summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_lpss.c2
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/base/platform-msi.c2
-rw-r--r--drivers/base/platform.c13
-rw-r--r--drivers/base/power/common.c2
-rw-r--r--drivers/base/power/domain.c54
-rw-r--r--drivers/bus/Kconfig2
-rw-r--r--drivers/bus/vexpress-config.c4
-rw-r--r--drivers/char/hw_random/Kconfig1
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c3
-rw-r--r--drivers/clocksource/Kconfig12
-rw-r--r--drivers/clocksource/tcb_clksrc.c3
-rw-r--r--drivers/cpufreq/cpufreq-dt.c15
-rw-r--r--drivers/cpufreq/cpufreq.c6
-rw-r--r--drivers/cpufreq/cpufreq_governor.c11
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c2
-rw-r--r--drivers/cpuidle/coupled.c1
-rw-r--r--drivers/cpuidle/cpuidle.c2
-rw-r--r--drivers/crypto/atmel-aes.c16
-rw-r--r--drivers/crypto/caam/ctrl.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smc.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_dpm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c41
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c24
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c279
-rw-r--r--drivers/gpu/drm/etnaviv/common.xml.h59
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c36
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c10
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c189
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h9
-rw-r--r--drivers/gpu/drm/etnaviv/state_hi.xml.h26
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c55
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c6
-rw-r--r--drivers/gpu/drm/i2c/adv7511.c48
-rw-r--r--drivers/gpu/drm/i2c/adv7511.h12
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c33
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c1
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c12
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c10
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c7
-rw-r--r--drivers/gpu/drm/radeon/vce_v1_0.c12
-rw-r--r--drivers/gpu/drm/rockchip/Makefile8
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c24
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h11
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c16
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c7
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c11
-rw-r--r--drivers/hwmon/fam15h_power.c10
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c4
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c3
-rw-r--r--drivers/i2c/busses/i2c-piix4.c50
-rw-r--r--drivers/iio/accel/Kconfig1
-rw-r--r--drivers/iio/adc/Kconfig3
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c2
-rw-r--r--drivers/iio/dac/mcp4725.c1
-rw-r--r--drivers/iio/humidity/dht11.c8
-rw-r--r--drivers/iio/imu/adis_buffer.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig2
-rw-r--r--drivers/iio/inkern.c2
-rw-r--r--drivers/iio/light/acpi-als.c6
-rw-r--r--drivers/iio/light/ltr501.c2
-rw-r--r--drivers/iio/pressure/mpl115.c2
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c2
-rw-r--r--drivers/infiniband/core/ud_header.c7
-rw-r--r--drivers/infiniband/hw/mlx5/main.c8
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c20
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/iommu/io-pgtable-arm.c1
-rw-r--r--drivers/irqchip/Kconfig1
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c8
-rw-r--r--drivers/irqchip/irq-mxs.c1
-rw-r--r--drivers/irqchip/irq-s3c24xx.c2
-rw-r--r--drivers/mailbox/Kconfig1
-rw-r--r--drivers/mailbox/pcc.c8
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/md/faulty.c18
-rw-r--r--drivers/md/md-cluster.c6
-rw-r--r--drivers/md/raid1.c21
-rw-r--r--drivers/md/raid10.c20
-rw-r--r--drivers/md/raid5.c45
-rw-r--r--drivers/media/dvb-frontends/tda1004x.c9
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c1
-rw-r--r--drivers/media/i2c/s5k6a3.c3
-rw-r--r--drivers/media/pci/saa7134/saa7134-alsa.c5
-rw-r--r--drivers/media/platform/Kconfig1
-rw-r--r--drivers/media/platform/exynos4-is/Kconfig2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c6
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.c4
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c95
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c2
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c7
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c95
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx.c5
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c12
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h1
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c4
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c9
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c4
-rw-r--r--drivers/net/ethernet/ezchip/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/Makefile3
-rw-r--r--drivers/net/ethernet/freescale/fec.h44
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c133
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c17
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h2
-rw-r--r--drivers/net/ethernet/hp/hp100.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c4
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h94
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c161
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c85
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c46
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.h4
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c31
-rw-r--r--drivers/net/ethernet/rocker/rocker.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c16
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c12
-rw-r--r--drivers/net/fddi/defxx.c8
-rw-r--r--drivers/net/geneve.c4
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c33
-rw-r--r--drivers/net/hyperv/netvsc_drv.c67
-rw-r--r--drivers/net/irda/bfin_sir.h3
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/dp83640.c17
-rw-r--r--drivers/net/phy/phy.c46
-rw-r--r--drivers/net/phy/smsc.c54
-rw-r--r--drivers/net/ppp/pptp.c34
-rw-r--r--drivers/net/usb/lan78xx.c132
-rw-r--r--drivers/net/vxlan.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c40
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-7000.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500pci.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500usb.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00config.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c2
-rw-r--r--drivers/net/xen-netfront.c15
-rw-r--r--drivers/nvdimm/namespace_devs.c8
-rw-r--r--drivers/nvdimm/pfn_devs.c4
-rw-r--r--drivers/of/irq.c18
-rw-r--r--drivers/of/of_mdio.c37
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c4
-rw-r--r--drivers/pnp/quirks.c1
-rw-r--r--drivers/ptp/ptp_ixp46x.c7
-rw-r--r--drivers/s390/cio/chp.c21
-rw-r--r--drivers/s390/cio/chp.h2
-rw-r--r--drivers/s390/cio/chsc.c43
-rw-r--r--drivers/s390/crypto/zcrypt_error.h15
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c9
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c20
-rw-r--r--drivers/scsi/hisi_sas/Kconfig2
-rw-r--r--drivers/scsi/sd.c9
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/sr.c4
-rw-r--r--drivers/spi/spi-bcm2835aux.c4
-rw-r--r--drivers/spi/spi-fsl-espi.c4
-rw-r--r--drivers/spi/spi-imx.c8
-rw-r--r--drivers/spi/spi-loopback-test.c1
-rw-r--r--drivers/spi/spi-omap2-mcspi.c3
-rw-r--r--drivers/ssb/main.c7
-rw-r--r--drivers/staging/iio/adc/Kconfig1
-rw-r--r--drivers/staging/iio/meter/ade7753.c12
-rw-r--r--drivers/staging/panel/panel.c34
-rw-r--r--drivers/staging/rdma/Kconfig6
-rw-r--r--drivers/staging/rdma/Makefile3
-rw-r--r--drivers/staging/rdma/amso1100/Kbuild6
-rw-r--r--drivers/staging/rdma/amso1100/Kconfig15
-rw-r--r--drivers/staging/rdma/amso1100/TODO4
-rw-r--r--drivers/staging/rdma/amso1100/c2.c1240
-rw-r--r--drivers/staging/rdma/amso1100/c2.h547
-rw-r--r--drivers/staging/rdma/amso1100/c2_ae.c327
-rw-r--r--drivers/staging/rdma/amso1100/c2_ae.h108
-rw-r--r--drivers/staging/rdma/amso1100/c2_alloc.c142
-rw-r--r--drivers/staging/rdma/amso1100/c2_cm.c458
-rw-r--r--drivers/staging/rdma/amso1100/c2_cq.c437
-rw-r--r--drivers/staging/rdma/amso1100/c2_intr.c219
-rw-r--r--drivers/staging/rdma/amso1100/c2_mm.c377
-rw-r--r--drivers/staging/rdma/amso1100/c2_mq.c175
-rw-r--r--drivers/staging/rdma/amso1100/c2_mq.h106
-rw-r--r--drivers/staging/rdma/amso1100/c2_pd.c90
-rw-r--r--drivers/staging/rdma/amso1100/c2_provider.c862
-rw-r--r--drivers/staging/rdma/amso1100/c2_provider.h182
-rw-r--r--drivers/staging/rdma/amso1100/c2_qp.c1024
-rw-r--r--drivers/staging/rdma/amso1100/c2_rnic.c652
-rw-r--r--drivers/staging/rdma/amso1100/c2_status.h158
-rw-r--r--drivers/staging/rdma/amso1100/c2_user.h82
-rw-r--r--drivers/staging/rdma/amso1100/c2_vq.c260
-rw-r--r--drivers/staging/rdma/amso1100/c2_vq.h63
-rw-r--r--drivers/staging/rdma/amso1100/c2_wr.h1520
-rw-r--r--drivers/staging/rdma/ehca/Kconfig10
-rw-r--r--drivers/staging/rdma/ehca/Makefile16
-rw-r--r--drivers/staging/rdma/ehca/TODO4
-rw-r--r--drivers/staging/rdma/ehca/ehca_av.c279
-rw-r--r--drivers/staging/rdma/ehca/ehca_classes.h481
-rw-r--r--drivers/staging/rdma/ehca/ehca_classes_pSeries.h208
-rw-r--r--drivers/staging/rdma/ehca/ehca_cq.c397
-rw-r--r--drivers/staging/rdma/ehca/ehca_eq.c189
-rw-r--r--drivers/staging/rdma/ehca/ehca_hca.c414
-rw-r--r--drivers/staging/rdma/ehca/ehca_irq.c870
-rw-r--r--drivers/staging/rdma/ehca/ehca_irq.h77
-rw-r--r--drivers/staging/rdma/ehca/ehca_iverbs.h202
-rw-r--r--drivers/staging/rdma/ehca/ehca_main.c1118
-rw-r--r--drivers/staging/rdma/ehca/ehca_mcast.c131
-rw-r--r--drivers/staging/rdma/ehca/ehca_mrmw.c2202
-rw-r--r--drivers/staging/rdma/ehca/ehca_mrmw.h127
-rw-r--r--drivers/staging/rdma/ehca/ehca_pd.c123
-rw-r--r--drivers/staging/rdma/ehca/ehca_qes.h260
-rw-r--r--drivers/staging/rdma/ehca/ehca_qp.c2256
-rw-r--r--drivers/staging/rdma/ehca/ehca_reqs.c953
-rw-r--r--drivers/staging/rdma/ehca/ehca_sqp.c245
-rw-r--r--drivers/staging/rdma/ehca/ehca_tools.h155
-rw-r--r--drivers/staging/rdma/ehca/ehca_uverbs.c309
-rw-r--r--drivers/staging/rdma/ehca/hcp_if.c949
-rw-r--r--drivers/staging/rdma/ehca/hcp_if.h265
-rw-r--r--drivers/staging/rdma/ehca/hcp_phyp.c82
-rw-r--r--drivers/staging/rdma/ehca/hcp_phyp.h90
-rw-r--r--drivers/staging/rdma/ehca/hipz_fns.h68
-rw-r--r--drivers/staging/rdma/ehca/hipz_fns_core.h100
-rw-r--r--drivers/staging/rdma/ehca/hipz_hw.h414
-rw-r--r--drivers/staging/rdma/ehca/ipz_pt_fn.c289
-rw-r--r--drivers/staging/rdma/ehca/ipz_pt_fn.h289
-rw-r--r--drivers/staging/rdma/ipath/Kconfig16
-rw-r--r--drivers/staging/rdma/ipath/Makefile37
-rw-r--r--drivers/staging/rdma/ipath/TODO5
-rw-r--r--drivers/staging/rdma/ipath/ipath_common.h851
-rw-r--r--drivers/staging/rdma/ipath/ipath_cq.c483
-rw-r--r--drivers/staging/rdma/ipath/ipath_debug.h99
-rw-r--r--drivers/staging/rdma/ipath/ipath_diag.c551
-rw-r--r--drivers/staging/rdma/ipath/ipath_dma.c179
-rw-r--r--drivers/staging/rdma/ipath/ipath_driver.c2784
-rw-r--r--drivers/staging/rdma/ipath/ipath_eeprom.c1183
-rw-r--r--drivers/staging/rdma/ipath/ipath_file_ops.c2619
-rw-r--r--drivers/staging/rdma/ipath/ipath_fs.c415
-rw-r--r--drivers/staging/rdma/ipath/ipath_iba6110.c1939
-rw-r--r--drivers/staging/rdma/ipath/ipath_init_chip.c1062
-rw-r--r--drivers/staging/rdma/ipath/ipath_intr.c1271
-rw-r--r--drivers/staging/rdma/ipath/ipath_kernel.h1374
-rw-r--r--drivers/staging/rdma/ipath/ipath_keys.c270
-rw-r--r--drivers/staging/rdma/ipath/ipath_mad.c1521
-rw-r--r--drivers/staging/rdma/ipath/ipath_mmap.c174
-rw-r--r--drivers/staging/rdma/ipath/ipath_mr.c370
-rw-r--r--drivers/staging/rdma/ipath/ipath_qp.c1079
-rw-r--r--drivers/staging/rdma/ipath/ipath_rc.c1969
-rw-r--r--drivers/staging/rdma/ipath/ipath_registers.h512
-rw-r--r--drivers/staging/rdma/ipath/ipath_ruc.c733
-rw-r--r--drivers/staging/rdma/ipath/ipath_sdma.c818
-rw-r--r--drivers/staging/rdma/ipath/ipath_srq.c380
-rw-r--r--drivers/staging/rdma/ipath/ipath_stats.c347
-rw-r--r--drivers/staging/rdma/ipath/ipath_sysfs.c1237
-rw-r--r--drivers/staging/rdma/ipath/ipath_uc.c547
-rw-r--r--drivers/staging/rdma/ipath/ipath_ud.c579
-rw-r--r--drivers/staging/rdma/ipath/ipath_user_pages.c228
-rw-r--r--drivers/staging/rdma/ipath/ipath_user_sdma.c874
-rw-r--r--drivers/staging/rdma/ipath/ipath_user_sdma.h52
-rw-r--r--drivers/staging/rdma/ipath/ipath_verbs.c2376
-rw-r--r--drivers/staging/rdma/ipath/ipath_verbs.h941
-rw-r--r--drivers/staging/rdma/ipath/ipath_verbs_mcast.c363
-rw-r--r--drivers/staging/rdma/ipath/ipath_wc_ppc64.c49
-rw-r--r--drivers/staging/rdma/ipath/ipath_wc_x86_64.c144
-rw-r--r--drivers/staging/speakup/Kconfig2
-rw-r--r--drivers/staging/speakup/main.c21
-rw-r--r--drivers/staging/speakup/selection.c5
-rw-r--r--drivers/staging/speakup/serialio.c13
-rw-r--r--drivers/tty/n_tty.c7
-rw-r--r--drivers/tty/serial/8250/8250_pci.c29
-rw-r--r--drivers/tty/tty_io.c44
-rw-r--r--drivers/tty/tty_mutex.c8
-rw-r--r--drivers/tty/vt/vt.c1
-rw-r--r--drivers/usb/class/cdc-acm.c14
-rw-r--r--drivers/usb/class/cdc-acm.h1
-rw-r--r--drivers/usb/core/hub.c8
-rw-r--r--drivers/usb/dwc2/core.c14
-rw-r--r--drivers/usb/dwc2/platform.c8
-rw-r--r--drivers/usb/dwc3/gadget.c1
-rw-r--r--drivers/usb/host/xhci-ext-caps.h4
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c16
-rw-r--r--drivers/usb/host/xhci-mtk.c23
-rw-r--r--drivers/usb/host/xhci-pci.c56
-rw-r--r--drivers/usb/host/xhci-plat.c3
-rw-r--r--drivers/usb/host/xhci-ring.c10
-rw-r--r--drivers/usb/host/xhci.c4
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/musb/ux500.c7
-rw-r--r--drivers/usb/phy/phy-msm-usb.c37
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c2
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/usb/serial/mxu11x0.c20
-rw-r--r--drivers/usb/serial/option.c18
-rw-r--r--drivers/usb/serial/visor.c11
-rw-r--r--drivers/vfio/vfio.c24
-rw-r--r--drivers/virtio/virtio_pci_common.c2
-rw-r--r--drivers/watchdog/Kconfig6
-rw-r--r--drivers/watchdog/max63xx_wdt.c2
-rw-r--r--drivers/watchdog/pcwd_usb.c3
-rw-r--r--drivers/watchdog/sp805_wdt.c5
-rw-r--r--drivers/xen/tmem.c2
369 files changed, 2757 insertions, 54572 deletions
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index c570b1d9f094..0872d5fecb82 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -880,7 +880,7 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb,
break;
case BUS_NOTIFY_DRIVER_NOT_BOUND:
case BUS_NOTIFY_UNBOUND_DRIVER:
- pdev->dev.pm_domain = NULL;
+ dev_pm_domain_set(&pdev->dev, NULL);
break;
case BUS_NOTIFY_ADD_DEVICE:
dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 90e2d54be526..1316ddd92fac 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -135,14 +135,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
},
},
- {
- .callback = video_detect_force_vendor,
- .ident = "Dell Inspiron 5737",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
- },
- },
/*
* These models have a working acpi_video backlight control, and using
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 47c43386786b..279e53989374 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -284,6 +284,7 @@ out_free_priv_data:
return err;
}
+EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs);
/**
* platform_msi_domain_free_irqs - Free MSI interrupts for @dev
@@ -301,6 +302,7 @@ void platform_msi_domain_free_irqs(struct device *dev)
msi_domain_free_irqs(dev->msi_domain, dev);
platform_msi_free_descs(dev, 0, MAX_DEV_MSIS);
}
+EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
/**
* platform_msi_get_host_data - Query the private data associated with
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 73d6e5d39e33..f437afa17f2b 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -558,10 +558,15 @@ static int platform_drv_probe(struct device *_dev)
return ret;
ret = dev_pm_domain_attach(_dev, true);
- if (ret != -EPROBE_DEFER && drv->probe) {
- ret = drv->probe(dev);
- if (ret)
- dev_pm_domain_detach(_dev, true);
+ if (ret != -EPROBE_DEFER) {
+ if (drv->probe) {
+ ret = drv->probe(dev);
+ if (ret)
+ dev_pm_domain_detach(_dev, true);
+ } else {
+ /* don't fail if just dev_pm_domain_attach failed */
+ ret = 0;
+ }
}
if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 93ed14cc2252..f6a9ad52cbbf 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -146,7 +146,7 @@ void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd)
if (dev->pm_domain == pd)
return;
- WARN(device_is_bound(dev),
+ WARN(pd && device_is_bound(dev),
"PM domains can only be changed for unbound devices\n");
dev->pm_domain = pd;
device_pm_check_callbacks(dev);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 6ac9a7f33b64..301b785f9f56 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -162,7 +162,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
/**
* genpd_queue_power_off_work - Queue up the execution of genpd_poweroff().
- * @genpd: PM domait to power off.
+ * @genpd: PM domain to power off.
*
* Queue up the execution of genpd_poweroff() unless it's already been done
* before.
@@ -172,16 +172,15 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
queue_work(pm_wq, &genpd->power_off_work);
}
-static int genpd_poweron(struct generic_pm_domain *genpd);
-
/**
- * __genpd_poweron - Restore power to a given PM domain and its masters.
+ * genpd_poweron - Restore power to a given PM domain and its masters.
* @genpd: PM domain to power up.
+ * @depth: nesting count for lockdep.
*
* Restore power to @genpd and all of its masters so that it is possible to
* resume a device belonging to it.
*/
-static int __genpd_poweron(struct generic_pm_domain *genpd)
+static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
{
struct gpd_link *link;
int ret = 0;
@@ -196,11 +195,16 @@ static int __genpd_poweron(struct generic_pm_domain *genpd)
* with it.
*/
list_for_each_entry(link, &genpd->slave_links, slave_node) {
- genpd_sd_counter_inc(link->master);
+ struct generic_pm_domain *master = link->master;
+
+ genpd_sd_counter_inc(master);
+
+ mutex_lock_nested(&master->lock, depth + 1);
+ ret = genpd_poweron(master, depth + 1);
+ mutex_unlock(&master->lock);
- ret = genpd_poweron(link->master);
if (ret) {
- genpd_sd_counter_dec(link->master);
+ genpd_sd_counter_dec(master);
goto err;
}
}
@@ -223,20 +227,6 @@ static int __genpd_poweron(struct generic_pm_domain *genpd)
return ret;
}
-/**
- * genpd_poweron - Restore power to a given PM domain and its masters.
- * @genpd: PM domain to power up.
- */
-static int genpd_poweron(struct generic_pm_domain *genpd)
-{
- int ret;
-
- mutex_lock(&genpd->lock);
- ret = __genpd_poweron(genpd);
- mutex_unlock(&genpd->lock);
- return ret;
-}
-
static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
{
return GENPD_DEV_CALLBACK(genpd, int, save_state, dev);
@@ -484,7 +474,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
}
mutex_lock(&genpd->lock);
- ret = __genpd_poweron(genpd);
+ ret = genpd_poweron(genpd, 0);
mutex_unlock(&genpd->lock);
if (ret)
@@ -1339,8 +1329,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
if (!link)
return -ENOMEM;
- mutex_lock(&genpd->lock);
- mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
+ mutex_lock(&subdomain->lock);
+ mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
if (genpd->status == GPD_STATE_POWER_OFF
&& subdomain->status != GPD_STATE_POWER_OFF) {
@@ -1363,8 +1353,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
genpd_sd_counter_inc(genpd);
out:
- mutex_unlock(&subdomain->lock);
mutex_unlock(&genpd->lock);
+ mutex_unlock(&subdomain->lock);
if (ret)
kfree(link);
return ret;
@@ -1385,7 +1375,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
return -EINVAL;
- mutex_lock(&genpd->lock);
+ mutex_lock(&subdomain->lock);
+ mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
@@ -1398,22 +1389,19 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
if (link->slave != subdomain)
continue;
- mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
-
list_del(&link->master_node);
list_del(&link->slave_node);
kfree(link);
if (subdomain->status != GPD_STATE_POWER_OFF)
genpd_sd_counter_dec(genpd);
- mutex_unlock(&subdomain->lock);
-
ret = 0;
break;
}
out:
mutex_unlock(&genpd->lock);
+ mutex_unlock(&subdomain->lock);
return ret;
}
@@ -1818,8 +1806,10 @@ int genpd_dev_pm_attach(struct device *dev)
dev->pm_domain->detach = genpd_dev_pm_detach;
dev->pm_domain->sync = genpd_dev_pm_sync;
- ret = genpd_poweron(pd);
+ mutex_lock(&pd->lock);
+ ret = genpd_poweron(pd, 0);
+ mutex_unlock(&pd->lock);
out:
return ret ? -EPROBE_DEFER : 0;
}
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 129d47bcc5fc..9a92c072a485 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -132,7 +132,7 @@ config SUNXI_RSB
and AC100/AC200 ICs.
config UNIPHIER_SYSTEM_BUS
- bool "UniPhier System Bus driver"
+ tristate "UniPhier System Bus driver"
depends on ARCH_UNIPHIER && OF
default y
help
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
index 6575c0fe6a4e..c3cb76b363c6 100644
--- a/drivers/bus/vexpress-config.c
+++ b/drivers/bus/vexpress-config.c
@@ -192,8 +192,10 @@ static int __init vexpress_config_init(void)
/* Need the config devices early, before the "normal" devices... */
for_each_compatible_node(node, NULL, "arm,vexpress,config-bus") {
err = vexpress_config_populate(node);
- if (err)
+ if (err) {
+ of_node_put(node);
break;
+ }
}
return err;
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index dbf22719462f..ff00331bff49 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -372,6 +372,7 @@ config HW_RANDOM_XGENE
config HW_RANDOM_STM32
tristate "STMicroelectronics STM32 random number generator"
depends on HW_RANDOM && (ARCH_STM32 || COMPILE_TEST)
+ depends on HAS_IOMEM
help
This driver provides kernel-side support for the Random Number
Generator hardware found on STM32 microcontrollers.
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 9fda22e3387e..7fddd8696211 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -68,6 +68,7 @@
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/acpi.h>
#ifdef CONFIG_PARISC
#include <asm/hardware.h> /* for register_parisc_driver() stuff */
@@ -2054,8 +2055,6 @@ static int hardcode_find_bmc(void)
#ifdef CONFIG_ACPI
-#include <linux/acpi.h>
-
/*
* Once we get an ACPI failure, we don't try any more, because we go
* through the tables sequentially. Once we don't find a table, there
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 56777f04d2d9..33db7406c0e2 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -30,6 +30,8 @@ config CLKSRC_MMIO
config DIGICOLOR_TIMER
bool "Digicolor timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ depends on HAS_IOMEM
help
Enables the support for the digicolor timer driver.
@@ -55,6 +57,7 @@ config ARMADA_370_XP_TIMER
bool "Armada 370 and XP timer driver" if COMPILE_TEST
depends on ARM
select CLKSRC_OF
+ select CLKSRC_MMIO
help
Enables the support for the Armada 370 and XP timer driver.
@@ -76,6 +79,7 @@ config ORION_TIMER
config SUN4I_TIMER
bool "Sun4i timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
+ depends on HAS_IOMEM
select CLKSRC_MMIO
help
Enables support for the Sun4i timer.
@@ -89,6 +93,7 @@ config SUN5I_HSTIMER
config TEGRA_TIMER
bool "Tegra timer driver" if COMPILE_TEST
+ select CLKSRC_MMIO
depends on ARM
help
Enables support for the Tegra driver.
@@ -96,6 +101,7 @@ config TEGRA_TIMER
config VT8500_TIMER
bool "VT8500 timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
+ depends on HAS_IOMEM
help
Enables support for the VT8500 driver.
@@ -131,6 +137,7 @@ config CLKSRC_NOMADIK_MTU_SCHED_CLOCK
config CLKSRC_DBX500_PRCMU
bool "Clocksource PRCMU Timer" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
+ depends on HAS_IOMEM
help
Use the always on PRCMU Timer as clocksource
@@ -248,6 +255,7 @@ config CLKSRC_EXYNOS_MCT
config CLKSRC_SAMSUNG_PWM
bool "PWM timer drvier for Samsung S3C, S5P" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
+ depends on HAS_IOMEM
help
This is a new clocksource driver for the PWM timer found in
Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver
@@ -257,12 +265,14 @@ config CLKSRC_SAMSUNG_PWM
config FSL_FTM_TIMER
bool "Freescale FlexTimer Module driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
+ depends on HAS_IOMEM
select CLKSRC_MMIO
help
Support for Freescale FlexTimer Module (FTM) timer.
config VF_PIT_TIMER
bool
+ select CLKSRC_MMIO
help
Support for Period Interrupt Timer on Freescale Vybrid Family SoCs.
@@ -360,6 +370,7 @@ config CLKSRC_TANGO_XTAL
config CLKSRC_PXA
bool "Clocksource for PXA or SA-11x0 platform" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
+ depends on HAS_IOMEM
select CLKSRC_MMIO
help
This enables OST0 support available on PXA and SA-11x0
@@ -394,6 +405,7 @@ config CLKSRC_ST_LPC
bool "Low power clocksource found in the LPC" if COMPILE_TEST
select CLKSRC_OF if OF
depends on HAS_IOMEM
+ select CLKSRC_MMIO
help
Enable this option to use the Low Power controller timer
as clocksource.
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 6ee91401918e..4da2af9694a2 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -98,7 +98,8 @@ static int tc_shutdown(struct clock_event_device *d)
__raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
__raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
- clk_disable(tcd->clk);
+ if (!clockevent_state_detached(d))
+ clk_disable(tcd->clk);
return 0;
}
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 9bc37c437874..0ca74d070058 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -142,15 +142,16 @@ static int allocate_resources(int cpu, struct device **cdev,
try_again:
cpu_reg = regulator_get_optional(cpu_dev, reg);
- if (IS_ERR(cpu_reg)) {
+ ret = PTR_ERR_OR_ZERO(cpu_reg);
+ if (ret) {
/*
* If cpu's regulator supply node is present, but regulator is
* not yet registered, we should try defering probe.
*/
- if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
+ if (ret == -EPROBE_DEFER) {
dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n",
cpu);
- return -EPROBE_DEFER;
+ return ret;
}
/* Try with "cpu-supply" */
@@ -159,18 +160,16 @@ try_again:
goto try_again;
}
- dev_dbg(cpu_dev, "no regulator for cpu%d: %ld\n",
- cpu, PTR_ERR(cpu_reg));
+ dev_dbg(cpu_dev, "no regulator for cpu%d: %d\n", cpu, ret);
}
cpu_clk = clk_get(cpu_dev, NULL);
- if (IS_ERR(cpu_clk)) {
+ ret = PTR_ERR_OR_ZERO(cpu_clk);
+ if (ret) {
/* put regulator */
if (!IS_ERR(cpu_reg))
regulator_put(cpu_reg);
- ret = PTR_ERR(cpu_clk);
-
/*
* If cpu's clk node is present, but clock is not yet
* registered, we should try defering probe.
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index c35e7da1ed7a..e979ec78b695 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -48,11 +48,11 @@ static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
bool active)
{
do {
- policy = list_next_entry(policy, policy_list);
-
/* No more policies in the list */
- if (&policy->policy_list == &cpufreq_policy_list)
+ if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
return NULL;
+
+ policy = list_next_entry(policy, policy_list);
} while (!suitable_policy(policy, active));
return policy;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index bab3a514ec12..e0d111024d48 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -387,16 +387,18 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
if (!have_governor_per_policy())
cdata->gdbs_data = dbs_data;
+ policy->governor_data = dbs_data;
+
ret = sysfs_create_group(get_governor_parent_kobj(policy),
get_sysfs_attr(dbs_data));
if (ret)
goto reset_gdbs_data;
- policy->governor_data = dbs_data;
-
return 0;
reset_gdbs_data:
+ policy->governor_data = NULL;
+
if (!have_governor_per_policy())
cdata->gdbs_data = NULL;
cdata->exit(dbs_data, !policy->governor->initialized);
@@ -417,16 +419,19 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy,
if (!cdbs->shared || cdbs->shared->policy)
return -EBUSY;
- policy->governor_data = NULL;
if (!--dbs_data->usage_count) {
sysfs_remove_group(get_governor_parent_kobj(policy),
get_sysfs_attr(dbs_data));
+ policy->governor_data = NULL;
+
if (!have_governor_per_policy())
cdata->gdbs_data = NULL;
cdata->exit(dbs_data, policy->governor->initialized == 1);
kfree(dbs_data);
+ } else {
+ policy->governor_data = NULL;
}
free_common_dbs_info(policy, cdata);
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index 1d99c97defa9..096377232747 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -202,7 +202,7 @@ static void __init pxa_cpufreq_init_voltages(void)
}
}
#else
-static int pxa_cpufreq_change_voltage(struct pxa_freqs *pxa_freq)
+static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
{
return 0;
}
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 344058f8501a..d5657d50ac40 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -119,7 +119,6 @@ struct cpuidle_coupled {
#define CPUIDLE_COUPLED_NOT_IDLE (-1)
-static DEFINE_MUTEX(cpuidle_coupled_lock);
static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
/*
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 046423b0c5ca..f996efc56605 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -153,7 +153,7 @@ int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* be frozen safely.
*/
index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
- if (index >= 0)
+ if (index > 0)
enter_freeze_proper(drv, dev, index);
return index;
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 6dd3317ca365..3eb3f1279fb7 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -400,7 +400,7 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
{
int err;
- err = clk_prepare_enable(dd->iclk);
+ err = clk_enable(dd->iclk);
if (err)
return err;
@@ -430,7 +430,7 @@ static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
dev_info(dd->dev, "version: 0x%x\n", dd->hw_version);
- clk_disable_unprepare(dd->iclk);
+ clk_disable(dd->iclk);
return 0;
}
@@ -448,7 +448,7 @@ static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
{
- clk_disable_unprepare(dd->iclk);
+ clk_disable(dd->iclk);
dd->flags &= ~AES_FLAGS_BUSY;
if (dd->is_async)
@@ -2091,10 +2091,14 @@ static int atmel_aes_probe(struct platform_device *pdev)
goto res_err;
}
- err = atmel_aes_hw_version_init(aes_dd);
+ err = clk_prepare(aes_dd->iclk);
if (err)
goto res_err;
+ err = atmel_aes_hw_version_init(aes_dd);
+ if (err)
+ goto iclk_unprepare;
+
atmel_aes_get_cap(aes_dd);
err = atmel_aes_buff_init(aes_dd);
@@ -2127,6 +2131,8 @@ err_algs:
err_aes_dma:
atmel_aes_buff_cleanup(aes_dd);
err_aes_buff:
+iclk_unprepare:
+ clk_unprepare(aes_dd->iclk);
res_err:
tasklet_kill(&aes_dd->done_task);
tasklet_kill(&aes_dd->queue_task);
@@ -2155,6 +2161,8 @@ static int atmel_aes_remove(struct platform_device *pdev)
atmel_aes_dma_cleanup(aes_dd);
atmel_aes_buff_cleanup(aes_dd);
+ clk_unprepare(aes_dd->iclk);
+
return 0;
}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 8abb4bc548cc..69d4a1326fee 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -534,8 +534,8 @@ static int caam_probe(struct platform_device *pdev)
* long pointers in master configuration register
*/
clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH |
- MCFGR_WDENABLE | (sizeof(dma_addr_t) == sizeof(u64) ?
- MCFGR_LONG_PTR : 0));
+ MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE |
+ (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
/*
* Read the Compile Time paramters and SCFGR to determine
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 66f729eaf00b..20c9539abc36 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -25,7 +25,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
# add asic specific block
-amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \
+amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
amdgpu_amdkfd_gfx_v7.o
@@ -34,6 +34,7 @@ amdgpu-y += \
# add GMC block
amdgpu-y += \
+ gmc_v7_0.o \
gmc_v8_0.o
# add IH block
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 313b0cc8d676..82edf95b7740 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -2278,60 +2278,60 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
#define amdgpu_dpm_get_temperature(adev) \
- (adev)->pp_enabled ? \
+ ((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
- (adev)->pm.funcs->get_temperature((adev))
+ (adev)->pm.funcs->get_temperature((adev)))
#define amdgpu_dpm_set_fan_control_mode(adev, m) \
- (adev)->pp_enabled ? \
+ ((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
- (adev)->pm.funcs->set_fan_control_mode((adev), (m))
+ (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
#define amdgpu_dpm_get_fan_control_mode(adev) \
- (adev)->pp_enabled ? \
+ ((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
- (adev)->pm.funcs->get_fan_control_mode((adev))
+ (adev)->pm.funcs->get_fan_control_mode((adev)))
#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
- (adev)->pp_enabled ? \
+ ((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
- (adev)->pm.funcs->set_fan_speed_percent((adev), (s))
+ (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
- (adev)->pp_enabled ? \
+ ((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
- (adev)->pm.funcs->get_fan_speed_percent((adev), (s))
+ (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
#define amdgpu_dpm_get_sclk(adev, l) \
- (adev)->pp_enabled ? \
+ ((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->get_sclk((adev), (l))
+ (adev)->pm.funcs->get_sclk((adev), (l)))
#define amdgpu_dpm_get_mclk(adev, l) \
- (adev)->pp_enabled ? \
+ ((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->get_mclk((adev), (l))
+ (adev)->pm.funcs->get_mclk((adev), (l)))
#define amdgpu_dpm_force_performance_level(adev, l) \
- (adev)->pp_enabled ? \
+ ((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->force_performance_level((adev), (l))
+ (adev)->pm.funcs->force_performance_level((adev), (l)))
#define amdgpu_dpm_powergate_uvd(adev, g) \
- (adev)->pp_enabled ? \
+ ((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
- (adev)->pm.funcs->powergate_uvd((adev), (g))
+ (adev)->pm.funcs->powergate_uvd((adev), (g)))
#define amdgpu_dpm_powergate_vce(adev, g) \
- (adev)->pp_enabled ? \
+ ((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
- (adev)->pm.funcs->powergate_vce((adev), (g))
+ (adev)->pm.funcs->powergate_vce((adev), (g)))
#define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \
- (adev)->pp_enabled ? \
+ ((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \
- (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m))
+ (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m)))
#define amdgpu_dpm_get_current_power_state(adev) \
(adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 0e1376317683..362bedc9e507 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -154,7 +154,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_fw_version = get_fw_version
};
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions()
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
{
return (struct kfd2kgd_calls *)&kfd2kgd;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 79fa5c7de856..04b744d64b57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -115,7 +115,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_fw_version = get_fw_version
};
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions()
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
{
return (struct kfd2kgd_calls *)&kfd2kgd;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 6f89f8e034d0..b882e8175615 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -478,9 +478,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
unsigned i;
- amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm);
-
if (!error) {
+ amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm);
+
/* Sort the buffer list from the smallest to largest buffer,
* which affects the order of buffers in the LRU list.
* This assures that the smallest buffers are added first
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b5dbbb573491..9c1af8976bef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -256,11 +256,11 @@ static struct pci_device_id pciidlist[] = {
{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
#endif
/* topaz */
- {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+ {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+ {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+ {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+ {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
/* tonga */
{0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
{0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index cfb6caad2a73..919146780a15 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -333,6 +333,10 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
if (!adev->mode_info.mode_config_initialized)
return 0;
+ /* don't init fbdev if there are no connectors */
+ if (list_empty(&adev->ddev->mode_config.connector_list))
+ return 0;
+
/* select 8 bpp console on low vram cards */
if (adev->mc.real_vram_size <= (32*1024*1024))
bpp_sel = 8;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index c3ce103b6a33..b8fbbd7699e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
+#include <drm/drm_cache.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
@@ -261,6 +262,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
AMDGPU_GEM_DOMAIN_OA);
bo->flags = flags;
+
+ /* For architectures that don't support WC memory,
+ * mask out the WC flag from the BO
+ */
+ if (!drm_arch_can_wc_memory())
+ bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+
amdgpu_fill_placement_to_bo(bo, placement);
/* Kernel allocation are uninterruptible */
r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
@@ -399,7 +407,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
}
if (fpfn > bo->placements[i].fpfn)
bo->placements[i].fpfn = fpfn;
- if (lpfn && lpfn < bo->placements[i].lpfn)
+ if (!bo->placements[i].lpfn ||
+ (lpfn && lpfn < bo->placements[i].lpfn))
bo->placements[i].lpfn = lpfn;
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 5ee9a0690278..b9d0d55f6b47 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -99,13 +99,24 @@ static int amdgpu_pp_early_init(void *handle)
#ifdef CONFIG_DRM_AMD_POWERPLAY
switch (adev->asic_type) {
- case CHIP_TONGA:
- case CHIP_FIJI:
- adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
- break;
- default:
- adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
- break;
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
+ break;
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
+ break;
+ /* These chips don't have powerplay implemenations */
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ case CHIP_KAVERI:
+ case CHIP_TOPAZ:
+ default:
+ adev->pp_enabled = false;
+ break;
}
#else
adev->pp_enabled = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 78e9b0f14661..d1f234dd2126 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -487,7 +487,7 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
seq_printf(m, "rptr: 0x%08x [%5d]\n",
rptr, rptr);
- rptr_next = ~0;
+ rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr);
seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
ring->wptr, ring->wptr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 8a1752ff3d8e..55cf05e1c81c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -808,7 +808,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
flags |= AMDGPU_PTE_SNOOPED;
}
- if (adev->asic_type >= CHIP_TOPAZ)
+ if (adev->asic_type >= CHIP_TONGA)
flags |= AMDGPU_PTE_EXECUTABLE;
flags |= AMDGPU_PTE_READABLE;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index aefc668e6b5d..9599f7559b3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1282,7 +1282,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
AMDGPU_VM_PTE_COUNT * 8);
- unsigned pd_size, pd_entries, pts_size;
+ unsigned pd_size, pd_entries;
int i, r;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -1300,8 +1300,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
pd_entries = amdgpu_vm_num_pdes(adev);
/* allocate page table array */
- pts_size = pd_entries * sizeof(struct amdgpu_vm_pt);
- vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
+ vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
if (vm->page_tables == NULL) {
DRM_ERROR("Cannot allocate memory for page table array\n");
return -ENOMEM;
@@ -1361,7 +1360,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
- kfree(vm->page_tables);
+ drm_free_large(vm->page_tables);
amdgpu_bo_unref(&vm->page_directory);
fence_put(vm->page_directory_fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 72793f93e2fc..6c76139de1c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4738,6 +4738,22 @@ static int gfx_v7_0_early_init(void *handle)
return 0;
}
+static int gfx_v7_0_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
+ if (r)
+ return r;
+
+ return 0;
+}
+
static int gfx_v7_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
@@ -4890,6 +4906,8 @@ static int gfx_v7_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
gfx_v7_0_cp_enable(adev, false);
gfx_v7_0_rlc_stop(adev);
gfx_v7_0_fini_pg(adev);
@@ -5527,7 +5545,7 @@ static int gfx_v7_0_set_powergating_state(void *handle,
const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.early_init = gfx_v7_0_early_init,
- .late_init = NULL,
+ .late_init = gfx_v7_0_late_init,
.sw_init = gfx_v7_0_sw_init,
.sw_fini = gfx_v7_0_sw_fini,
.hw_init = gfx_v7_0_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 13235d84e5a6..8f8ec37ecd88 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -111,7 +111,6 @@ MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
MODULE_FIRMWARE("amdgpu/topaz_me.bin");
MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
-MODULE_FIRMWARE("amdgpu/topaz_mec2.bin");
MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
@@ -828,7 +827,8 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- if (adev->asic_type != CHIP_STONEY) {
+ if ((adev->asic_type != CHIP_STONEY) &&
+ (adev->asic_type != CHIP_TOPAZ)) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
if (!err) {
@@ -3851,10 +3851,16 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
if (r)
return -EINVAL;
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_CP_MEC1);
- if (r)
- return -EINVAL;
+ if (adev->asic_type == CHIP_TOPAZ) {
+ r = gfx_v8_0_cp_compute_load_microcode(adev);
+ if (r)
+ return r;
+ } else {
+ r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+ AMDGPU_UCODE_ID_CP_MEC1);
+ if (r)
+ return -EINVAL;
+ }
}
}
@@ -3901,6 +3907,8 @@ static int gfx_v8_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
gfx_v8_0_cp_enable(adev, false);
gfx_v8_0_rlc_stop(adev);
gfx_v8_0_cp_compute_fini(adev);
@@ -4186,7 +4194,18 @@ static int gfx_v8_0_soft_reset(void *handle)
gfx_v8_0_cp_gfx_enable(adev, false);
/* Disable MEC parsing/prefetching */
- /* XXX todo */
+ gfx_v8_0_cp_compute_enable(adev, false);
+
+ if (grbm_soft_reset || srbm_soft_reset) {
+ tmp = RREG32(mmGMCON_DEBUG);
+ tmp = REG_SET_FIELD(tmp,
+ GMCON_DEBUG, GFX_STALL, 1);
+ tmp = REG_SET_FIELD(tmp,
+ GMCON_DEBUG, GFX_CLEAR, 1);
+ WREG32(mmGMCON_DEBUG, tmp);
+
+ udelay(50);
+ }
if (grbm_soft_reset) {
tmp = RREG32(mmGRBM_SOFT_RESET);
@@ -4215,6 +4234,16 @@ static int gfx_v8_0_soft_reset(void *handle)
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
}
+
+ if (grbm_soft_reset || srbm_soft_reset) {
+ tmp = RREG32(mmGMCON_DEBUG);
+ tmp = REG_SET_FIELD(tmp,
+ GMCON_DEBUG, GFX_STALL, 0);
+ tmp = REG_SET_FIELD(tmp,
+ GMCON_DEBUG, GFX_CLEAR, 0);
+ WREG32(mmGMCON_DEBUG, tmp);
+ }
+
/* Wait a little for things to settle down */
udelay(50);
gfx_v8_0_print_status((void *)adev);
@@ -4308,6 +4337,14 @@ static int gfx_v8_0_late_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
+ r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
+ if (r)
+ return r;
+
/* requires IBs so do in late init after IB pool is initialized */
r = gfx_v8_0_do_edc_gpr_workarounds(adev);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 3f956065d069..8aa2991ab379 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -42,9 +42,39 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
MODULE_FIRMWARE("radeon/bonaire_mc.bin");
MODULE_FIRMWARE("radeon/hawaii_mc.bin");
+MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
+
+static const u32 golden_settings_iceland_a11[] =
+{
+ mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
+};
+
+static const u32 iceland_mgcg_cgcg_init[] =
+{
+ mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
+};
+
+static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ amdgpu_program_register_sequence(adev,
+ iceland_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
+ amdgpu_program_register_sequence(adev,
+ golden_settings_iceland_a11,
+ (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
+ break;
+ default:
+ break;
+ }
+}
/**
- * gmc8_mc_wait_for_idle - wait for MC idle callback.
+ * gmc7_mc_wait_for_idle - wait for MC idle callback.
*
* @adev: amdgpu_device pointer
*
@@ -132,13 +162,20 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
case CHIP_HAWAII:
chip_name = "hawaii";
break;
+ case CHIP_TOPAZ:
+ chip_name = "topaz";
+ break;
case CHIP_KAVERI:
case CHIP_KABINI:
return 0;
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ if (adev->asic_type == CHIP_TOPAZ)
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+
err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
if (err)
goto out;
@@ -984,6 +1021,8 @@ static int gmc_v7_0_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ gmc_v7_0_init_golden_registers(adev);
+
gmc_v7_0_mc_program(adev);
if (!(adev->flags & AMD_IS_APU)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index c0c9a0101eb4..3efd45546241 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -42,9 +42,7 @@
static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
-MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
-MODULE_FIRMWARE("amdgpu/fiji_mc.bin");
static const u32 golden_settings_tonga_a11[] =
{
@@ -75,19 +73,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
-static const u32 golden_settings_iceland_a11[] =
-{
- mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
- mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
- mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
- mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
-};
-
-static const u32 iceland_mgcg_cgcg_init[] =
-{
- mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
-};
-
static const u32 cz_mgcg_cgcg_init[] =
{
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
@@ -102,14 +87,6 @@ static const u32 stoney_mgcg_cgcg_init[] =
static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
- case CHIP_TOPAZ:
- amdgpu_program_register_sequence(adev,
- iceland_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_iceland_a11,
- (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
- break;
case CHIP_FIJI:
amdgpu_program_register_sequence(adev,
fiji_mgcg_cgcg_init,
@@ -229,15 +206,10 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
switch (adev->asic_type) {
- case CHIP_TOPAZ:
- chip_name = "topaz";
- break;
case CHIP_TONGA:
chip_name = "tonga";
break;
case CHIP_FIJI:
- chip_name = "fiji";
- break;
case CHIP_CARRIZO:
case CHIP_STONEY:
return 0;
@@ -1007,7 +979,7 @@ static int gmc_v8_0_hw_init(void *handle)
gmc_v8_0_mc_program(adev);
- if (!(adev->flags & AMD_IS_APU)) {
+ if (adev->asic_type == CHIP_TONGA) {
r = gmc_v8_0_mc_load_microcode(adev);
if (r) {
DRM_ERROR("Failed to load MC firmware!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
index 966d4b2ed9da..090486c18249 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
@@ -432,7 +432,7 @@ static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
case AMDGPU_UCODE_ID_CP_ME:
return UCODE_ID_CP_ME_MASK;
case AMDGPU_UCODE_ID_CP_MEC1:
- return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK;
+ return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
case AMDGPU_UCODE_ID_CP_MEC2:
return UCODE_ID_CP_MEC_MASK;
case AMDGPU_UCODE_ID_RLC_G:
@@ -522,12 +522,6 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
return -EINVAL;
}
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
- return -EINVAL;
- }
-
if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for SDMA0\n");
@@ -550,8 +544,8 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
UCODE_ID_CP_ME_MASK |
UCODE_ID_CP_PFP_MASK |
UCODE_ID_CP_MEC_MASK |
- UCODE_ID_CP_MEC_JT1_MASK |
- UCODE_ID_CP_MEC_JT2_MASK;
+ UCODE_ID_CP_MEC_JT1_MASK;
+
if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
DRM_ERROR("Fail to request SMU load ucode\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
index f4a1346525fe..0497784b3652 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
@@ -122,25 +122,12 @@ static int tonga_dpm_hw_fini(void *handle)
static int tonga_dpm_suspend(void *handle)
{
- return 0;
+ return tonga_dpm_hw_fini(handle);
}
static int tonga_dpm_resume(void *handle)
{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- mutex_lock(&adev->pm.mutex);
-
- ret = tonga_smu_start(adev);
- if (ret) {
- DRM_ERROR("SMU start failed\n");
- goto fail;
- }
-
-fail:
- mutex_unlock(&adev->pm.mutex);
- return ret;
+ return tonga_dpm_hw_init(handle);
}
static int tonga_dpm_set_clockgating_state(void *handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 652e76644c31..89f5a1ff6f43 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -61,6 +61,7 @@
#include "vi.h"
#include "vi_dpm.h"
#include "gmc_v8_0.h"
+#include "gmc_v7_0.h"
#include "gfx_v8_0.h"
#include "sdma_v2_4.h"
#include "sdma_v3_0.h"
@@ -1109,10 +1110,10 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
},
{
.type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 0,
+ .major = 7,
+ .minor = 4,
.rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
+ .funcs = &gmc_v7_0_ip_funcs,
},
{
.type = AMD_IP_BLOCK_TYPE_IH,
@@ -1442,8 +1443,7 @@ static int vi_common_early_init(void *handle)
break;
case CHIP_FIJI:
adev->has_uvd = true;
- adev->cg_flags = AMDGPU_CG_SUPPORT_UVD_MGCG |
- AMDGPU_CG_SUPPORT_VCE_MGCG;
+ adev->cg_flags = 0;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x3c;
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 9be007081b72..a902ae037398 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -194,7 +194,7 @@ static void kfd_process_wq_release(struct work_struct *work)
kfree(p);
- kfree((void *)work);
+ kfree(work);
}
static void kfd_process_destroy_delayed(struct rcu_head *rcu)
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 8f5d5edcf193..aa67244a77ae 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -64,6 +64,11 @@ static int pp_sw_init(void *handle)
if (ret == 0)
ret = hwmgr->hwmgr_func->backend_init(hwmgr);
+ if (ret)
+ printk("amdgpu: powerplay initialization failed\n");
+ else
+ printk("amdgpu: powerplay initialized\n");
+
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
index 873a8d264d5c..ec222c665602 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
@@ -272,6 +272,9 @@ static int cz_start_smu(struct pp_smumgr *smumgr)
UCODE_ID_CP_MEC_JT1_MASK |
UCODE_ID_CP_MEC_JT2_MASK;
+ if (smumgr->chip_id == CHIP_STONEY)
+ fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
+
cz_request_smu_load_fw(smumgr);
cz_check_fw_load_finish(smumgr, fw_to_check);
@@ -282,7 +285,7 @@ static int cz_start_smu(struct pp_smumgr *smumgr)
return ret;
}
-static uint8_t cz_translate_firmware_enum_to_arg(
+static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr,
enum cz_scratch_entry firmware_enum)
{
uint8_t ret = 0;
@@ -292,7 +295,10 @@ static uint8_t cz_translate_firmware_enum_to_arg(
ret = UCODE_ID_SDMA0;
break;
case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
- ret = UCODE_ID_SDMA1;
+ if (smumgr->chip_id == CHIP_STONEY)
+ ret = UCODE_ID_SDMA0;
+ else
+ ret = UCODE_ID_SDMA1;
break;
case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
ret = UCODE_ID_CP_CE;
@@ -307,7 +313,10 @@ static uint8_t cz_translate_firmware_enum_to_arg(
ret = UCODE_ID_CP_MEC_JT1;
break;
case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
- ret = UCODE_ID_CP_MEC_JT2;
+ if (smumgr->chip_id == CHIP_STONEY)
+ ret = UCODE_ID_CP_MEC_JT1;
+ else
+ ret = UCODE_ID_CP_MEC_JT2;
break;
case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
ret = UCODE_ID_GMCON_RENG;
@@ -396,7 +405,7 @@ static int cz_smu_populate_single_scratch_task(
struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
task->type = type;
- task->arg = cz_translate_firmware_enum_to_arg(fw_enum);
+ task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum);
task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
for (i = 0; i < cz_smu->scratch_buffer_length; i++)
@@ -433,7 +442,7 @@ static int cz_smu_populate_single_ucode_load_task(
struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
task->type = TASK_TYPE_UCODE_LOAD;
- task->arg = cz_translate_firmware_enum_to_arg(fw_enum);
+ task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum);
task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
for (i = 0; i < cz_smu->driver_buffer_length; i++)
@@ -509,8 +518,14 @@ static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_smumgr *smumgr)
CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
cz_smu_populate_single_ucode_load_task(smumgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+
+ if (smumgr->chip_id == CHIP_STONEY)
+ cz_smu_populate_single_ucode_load_task(smumgr,
+ CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
+ else
+ cz_smu_populate_single_ucode_load_task(smumgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
+
cz_smu_populate_single_ucode_load_task(smumgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
@@ -551,7 +566,11 @@ static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
cz_smu_populate_single_ucode_load_task(smumgr,
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ if (smumgr->chip_id == CHIP_STONEY)
+ cz_smu_populate_single_ucode_load_task(smumgr,
+ CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
+ else
+ cz_smu_populate_single_ucode_load_task(smumgr,
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
cz_smu_populate_single_ucode_load_task(smumgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
@@ -561,7 +580,11 @@ static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
cz_smu_populate_single_ucode_load_task(smumgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ if (smumgr->chip_id == CHIP_STONEY)
+ cz_smu_populate_single_ucode_load_task(smumgr,
+ CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
+ else
+ cz_smu_populate_single_ucode_load_task(smumgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
cz_smu_populate_single_ucode_load_task(smumgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
@@ -618,7 +641,7 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr)
for (i = 0; i < sizeof(firmware_list)/sizeof(*firmware_list); i++) {
- firmware_type = cz_translate_firmware_enum_to_arg(
+ firmware_type = cz_translate_firmware_enum_to_arg(smumgr,
firmware_list[i]);
ucode_id = cz_convert_fw_type_to_cgs(firmware_type);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 57cccd68ca52..7c523060a076 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -946,9 +946,23 @@ static void wait_for_fences(struct drm_device *dev,
}
}
-static bool framebuffer_changed(struct drm_device *dev,
- struct drm_atomic_state *old_state,
- struct drm_crtc *crtc)
+/**
+ * drm_atomic_helper_framebuffer_changed - check if framebuffer has changed
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ * @crtc: DRM crtc
+ *
+ * Checks whether the framebuffer used for this CRTC changes as a result of
+ * the atomic update. This is useful for drivers which cannot use
+ * drm_atomic_helper_wait_for_vblanks() and need to reimplement its
+ * functionality.
+ *
+ * Returns:
+ * true if the framebuffer changed.
+ */
+bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev,
+ struct drm_atomic_state *old_state,
+ struct drm_crtc *crtc)
{
struct drm_plane *plane;
struct drm_plane_state *old_plane_state;
@@ -965,6 +979,7 @@ static bool framebuffer_changed(struct drm_device *dev,
return false;
}
+EXPORT_SYMBOL(drm_atomic_helper_framebuffer_changed);
/**
* drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs
@@ -999,7 +1014,8 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
if (old_state->legacy_cursor_update)
continue;
- if (!framebuffer_changed(dev, old_state, crtc))
+ if (!drm_atomic_helper_framebuffer_changed(dev,
+ old_state, crtc))
continue;
ret = drm_crtc_vblank_get(crtc);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 6ed90a2437e5..8ae13de272c4 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -803,6 +803,18 @@ static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
return mstb;
}
+static void drm_dp_free_mst_port(struct kref *kref);
+
+static void drm_dp_free_mst_branch_device(struct kref *kref)
+{
+ struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
+ if (mstb->port_parent) {
+ if (list_empty(&mstb->port_parent->next))
+ kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
+ }
+ kfree(mstb);
+}
+
static void drm_dp_destroy_mst_branch_device(struct kref *kref)
{
struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
@@ -810,6 +822,15 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
bool wake_tx = false;
/*
+ * init kref again to be used by ports to remove mst branch when it is
+ * not needed anymore
+ */
+ kref_init(kref);
+
+ if (mstb->port_parent && list_empty(&mstb->port_parent->next))
+ kref_get(&mstb->port_parent->kref);
+
+ /*
* destroy all ports - don't need lock
* as there are no more references to the mst branch
* device at this point.
@@ -835,7 +856,8 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
if (wake_tx)
wake_up(&mstb->mgr->tx_waitq);
- kfree(mstb);
+
+ kref_put(kref, drm_dp_free_mst_branch_device);
}
static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
@@ -883,6 +905,7 @@ static void drm_dp_destroy_port(struct kref *kref)
* from an EDID retrieval */
mutex_lock(&mgr->destroy_connector_lock);
+ kref_get(&port->parent->kref);
list_add(&port->next, &mgr->destroy_connector_list);
mutex_unlock(&mgr->destroy_connector_lock);
schedule_work(&mgr->destroy_connector_work);
@@ -1018,18 +1041,27 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
return send_link;
}
-static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
- struct drm_dp_mst_port *port)
+static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
{
int ret;
- if (port->dpcd_rev >= 0x12) {
- port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
- if (!port->guid_valid) {
- ret = drm_dp_send_dpcd_write(mstb->mgr,
- port,
- DP_GUID,
- 16, port->guid);
- port->guid_valid = true;
+
+ memcpy(mstb->guid, guid, 16);
+
+ if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
+ if (mstb->port_parent) {
+ ret = drm_dp_send_dpcd_write(
+ mstb->mgr,
+ mstb->port_parent,
+ DP_GUID,
+ 16,
+ mstb->guid);
+ } else {
+
+ ret = drm_dp_dpcd_write(
+ mstb->mgr->aux,
+ DP_GUID,
+ mstb->guid,
+ 16);
}
}
}
@@ -1086,7 +1118,6 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
port->dpcd_rev = port_msg->dpcd_revision;
port->num_sdp_streams = port_msg->num_sdp_streams;
port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
- memcpy(port->guid, port_msg->peer_guid, 16);
/* manage mstb port lists with mgr lock - take a reference
for this list */
@@ -1099,11 +1130,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
if (old_ddps != port->ddps) {
if (port->ddps) {
- drm_dp_check_port_guid(mstb, port);
if (!port->input)
drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
} else {
- port->guid_valid = false;
port->available_pbn = 0;
}
}
@@ -1130,13 +1159,11 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
drm_dp_put_port(port);
goto out;
}
- if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
- port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
- drm_mode_connector_set_tile_property(port->connector);
- }
+
+ drm_mode_connector_set_tile_property(port->connector);
+
(*mstb->mgr->cbs->register_connector)(port->connector);
}
-
out:
/* put reference to this port */
drm_dp_put_port(port);
@@ -1161,11 +1188,9 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
port->ddps = conn_stat->displayport_device_plug_status;
if (old_ddps != port->ddps) {
+ dowork = true;
if (port->ddps) {
- drm_dp_check_port_guid(mstb, port);
- dowork = true;
} else {
- port->guid_valid = false;
port->available_pbn = 0;
}
}
@@ -1222,13 +1247,14 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
struct drm_dp_mst_branch *found_mstb;
struct drm_dp_mst_port *port;
+ if (memcmp(mstb->guid, guid, 16) == 0)
+ return mstb;
+
+
list_for_each_entry(port, &mstb->ports, next) {
if (!port->mstb)
continue;
- if (port->guid_valid && memcmp(port->guid, guid, 16) == 0)
- return port->mstb;
-
found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
if (found_mstb)
@@ -1247,10 +1273,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
/* find the port by iterating down */
mutex_lock(&mgr->lock);
- if (mgr->guid_valid && memcmp(mgr->guid, guid, 16) == 0)
- mstb = mgr->mst_primary;
- else
- mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
+ mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
if (mstb)
kref_get(&mstb->kref);
@@ -1271,8 +1294,13 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
if (port->input)
continue;
- if (!port->ddps)
+ if (!port->ddps) {
+ if (port->cached_edid) {
+ kfree(port->cached_edid);
+ port->cached_edid = NULL;
+ }
continue;
+ }
if (!port->available_pbn)
drm_dp_send_enum_path_resources(mgr, mstb, port);
@@ -1283,6 +1311,12 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
drm_dp_check_and_send_link_address(mgr, mstb_child);
drm_dp_put_mst_branch_device(mstb_child);
}
+ } else if (port->pdt == DP_PEER_DEVICE_SST_SINK ||
+ port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV) {
+ if (!port->cached_edid) {
+ port->cached_edid =
+ drm_get_edid(port->connector, &port->aux.ddc);
+ }
}
}
}
@@ -1302,6 +1336,8 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
drm_dp_check_and_send_link_address(mgr, mstb);
drm_dp_put_mst_branch_device(mstb);
}
+
+ (*mgr->cbs->hotplug)(mgr);
}
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
@@ -1555,10 +1591,12 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
}
+
+ drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
+
for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
}
- (*mgr->cbs->hotplug)(mgr);
}
} else {
mstb->link_address_sent = false;
@@ -1602,6 +1640,37 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
return 0;
}
+static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
+{
+ if (!mstb->port_parent)
+ return NULL;
+
+ if (mstb->port_parent->mstb != mstb)
+ return mstb->port_parent;
+
+ return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
+}
+
+static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb,
+ int *port_num)
+{
+ struct drm_dp_mst_branch *rmstb = NULL;
+ struct drm_dp_mst_port *found_port;
+ mutex_lock(&mgr->lock);
+ if (mgr->mst_primary) {
+ found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
+
+ if (found_port) {
+ rmstb = found_port->parent;
+ kref_get(&rmstb->kref);
+ *port_num = found_port->port_num;
+ }
+ }
+ mutex_unlock(&mgr->lock);
+ return rmstb;
+}
+
static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int id,
@@ -1609,13 +1678,18 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
{
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
- int len, ret;
+ int len, ret, port_num;
u8 sinks[DRM_DP_MAX_SDP_STREAMS];
int i;
+ port_num = port->port_num;
mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
- if (!mstb)
- return -EINVAL;
+ if (!mstb) {
+ mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
+
+ if (!mstb)
+ return -EINVAL;
+ }
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg) {
@@ -1627,7 +1701,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
sinks[i] = i;
txmsg->dst = mstb;
- len = build_allocate_payload(txmsg, port->port_num,
+ len = build_allocate_payload(txmsg, port_num,
id,
pbn, port->num_sdp_streams, sinks);
@@ -1983,31 +2057,17 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mgr->mst_primary = mstb;
kref_get(&mgr->mst_primary->kref);
- {
- struct drm_dp_payload reset_pay;
- reset_pay.start_slot = 0;
- reset_pay.num_slots = 0x3f;
- drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
- }
-
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
+ DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
if (ret < 0) {
goto out_unlock;
}
-
- /* sort out guid */
- ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
- if (ret != 16) {
- DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
- goto out_unlock;
- }
-
- mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
- if (!mgr->guid_valid) {
- ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
- mgr->guid_valid = true;
+ {
+ struct drm_dp_payload reset_pay;
+ reset_pay.start_slot = 0;
+ reset_pay.num_slots = 0x3f;
+ drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
}
queue_work(system_long_wq, &mgr->work);
@@ -2231,9 +2291,8 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
}
drm_dp_update_port(mstb, &msg.u.conn_stat);
- DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
- (*mgr->cbs->hotplug)(mgr);
+ DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
if (!mstb)
@@ -2320,10 +2379,6 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
case DP_PEER_DEVICE_SST_SINK:
status = connector_status_connected;
- /* for logical ports - cache the EDID */
- if (port->port_num >= 8 && !port->cached_edid) {
- port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
- }
break;
case DP_PEER_DEVICE_DP_LEGACY_CONV:
if (port->ldps)
@@ -2378,10 +2433,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
if (port->cached_edid)
edid = drm_edid_duplicate(port->cached_edid);
- else {
- edid = drm_get_edid(connector, &port->aux.ddc);
- drm_mode_connector_set_tile_property(connector);
- }
+
port->has_audio = drm_detect_monitor_audio(edid);
drm_dp_put_port(port);
return edid;
@@ -2446,6 +2498,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp
DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
if (pbn == port->vcpi.pbn) {
*slots = port->vcpi.num_slots;
+ drm_dp_put_port(port);
return true;
}
}
@@ -2605,32 +2658,31 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
*/
int drm_dp_calc_pbn_mode(int clock, int bpp)
{
- fixed20_12 pix_bw;
- fixed20_12 fbpp;
- fixed20_12 result;
- fixed20_12 margin, tmp;
- u32 res;
-
- pix_bw.full = dfixed_const(clock);
- fbpp.full = dfixed_const(bpp);
- tmp.full = dfixed_const(8);
- fbpp.full = dfixed_div(fbpp, tmp);
-
- result.full = dfixed_mul(pix_bw, fbpp);
- margin.full = dfixed_const(54);
- tmp.full = dfixed_const(64);
- margin.full = dfixed_div(margin, tmp);
- result.full = dfixed_div(result, margin);
-
- margin.full = dfixed_const(1006);
- tmp.full = dfixed_const(1000);
- margin.full = dfixed_div(margin, tmp);
- result.full = dfixed_mul(result, margin);
-
- result.full = dfixed_div(result, tmp);
- result.full = dfixed_ceil(result);
- res = dfixed_trunc(result);
- return res;
+ u64 kbps;
+ s64 peak_kbps;
+ u32 numerator;
+ u32 denominator;
+
+ kbps = clock * bpp;
+
+ /*
+ * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
+ * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
+ * common multiplier to render an integer PBN for all link rate/lane
+ * counts combinations
+ * calculate
+ * peak_kbps *= (1006/1000)
+ * peak_kbps *= (64/54)
+ * peak_kbps *= 8 convert to bytes
+ */
+
+ numerator = 64 * 1006;
+ denominator = 54 * 8 * 1000 * 1000;
+
+ kbps *= numerator;
+ peak_kbps = drm_fixp_from_fraction(kbps, denominator);
+
+ return drm_fixp2int_ceil(peak_kbps);
}
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
@@ -2638,11 +2690,23 @@ static int test_calc_pbn_mode(void)
{
int ret;
ret = drm_dp_calc_pbn_mode(154000, 30);
- if (ret != 689)
+ if (ret != 689) {
+ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
+ 154000, 30, 689, ret);
return -EINVAL;
+ }
ret = drm_dp_calc_pbn_mode(234000, 30);
- if (ret != 1047)
+ if (ret != 1047) {
+ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
+ 234000, 30, 1047, ret);
return -EINVAL;
+ }
+ ret = drm_dp_calc_pbn_mode(297000, 24);
+ if (ret != 1063) {
+ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
+ 297000, 24, 1063, ret);
+ return -EINVAL;
+ }
return 0;
}
@@ -2783,6 +2847,13 @@ static void drm_dp_tx_work(struct work_struct *work)
mutex_unlock(&mgr->qlock);
}
+static void drm_dp_free_mst_port(struct kref *kref)
+{
+ struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
+ kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
+ kfree(port);
+}
+
static void drm_dp_destroy_connector_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
@@ -2803,13 +2874,22 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
list_del(&port->next);
mutex_unlock(&mgr->destroy_connector_lock);
+ kref_init(&port->kref);
+ INIT_LIST_HEAD(&port->next);
+
mgr->cbs->destroy_connector(mgr, port->connector);
drm_dp_port_teardown_pdt(port, port->pdt);
- if (!port->input && port->vcpi.vcpi > 0)
- drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
- kfree(port);
+ if (!port->input && port->vcpi.vcpi > 0) {
+ if (mgr->mst_state) {
+ drm_dp_mst_reset_vcpi_slots(mgr, port);
+ drm_dp_update_payload_part1(mgr);
+ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+ }
+ }
+
+ kref_put(&port->kref, drm_dp_free_mst_port);
send_hotplug = true;
}
if (send_hotplug)
@@ -2847,6 +2927,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
mgr->max_payloads = max_payloads;
mgr->conn_base_id = conn_base_id;
+ if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
+ max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
+ return -EINVAL;
mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
if (!mgr->payloads)
return -ENOMEM;
@@ -2854,7 +2937,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
if (!mgr->proposed_vcpis)
return -ENOMEM;
set_bit(0, &mgr->payload_mask);
- test_calc_pbn_mode();
+ if (test_calc_pbn_mode() < 0)
+ DRM_ERROR("MST PBN self-test failed\n");
+
return 0;
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
diff --git a/drivers/gpu/drm/etnaviv/common.xml.h b/drivers/gpu/drm/etnaviv/common.xml.h
index 9e585d51fb78..e881482b5971 100644
--- a/drivers/gpu/drm/etnaviv/common.xml.h
+++ b/drivers/gpu/drm/etnaviv/common.xml.h
@@ -8,8 +8,8 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
git clone git://0x04.net/rules-ng-ng
The rules-ng-ng source files this header was generated from are:
-- state_vg.xml ( 5973 bytes, from 2015-03-25 11:26:01)
-- common.xml ( 18437 bytes, from 2015-03-25 11:27:41)
+- state_hi.xml ( 24309 bytes, from 2015-12-12 09:02:53)
+- common.xml ( 18379 bytes, from 2015-12-12 09:02:53)
Copyright (C) 2015
*/
@@ -30,15 +30,19 @@ Copyright (C) 2015
#define ENDIAN_MODE_NO_SWAP 0x00000000
#define ENDIAN_MODE_SWAP_16 0x00000001
#define ENDIAN_MODE_SWAP_32 0x00000002
+#define chipModel_GC200 0x00000200
#define chipModel_GC300 0x00000300
#define chipModel_GC320 0x00000320
+#define chipModel_GC328 0x00000328
#define chipModel_GC350 0x00000350
#define chipModel_GC355 0x00000355
#define chipModel_GC400 0x00000400
#define chipModel_GC410 0x00000410
#define chipModel_GC420 0x00000420
+#define chipModel_GC428 0x00000428
#define chipModel_GC450 0x00000450
#define chipModel_GC500 0x00000500
+#define chipModel_GC520 0x00000520
#define chipModel_GC530 0x00000530
#define chipModel_GC600 0x00000600
#define chipModel_GC700 0x00000700
@@ -46,9 +50,16 @@ Copyright (C) 2015
#define chipModel_GC860 0x00000860
#define chipModel_GC880 0x00000880
#define chipModel_GC1000 0x00001000
+#define chipModel_GC1500 0x00001500
#define chipModel_GC2000 0x00002000
#define chipModel_GC2100 0x00002100
+#define chipModel_GC2200 0x00002200
+#define chipModel_GC2500 0x00002500
+#define chipModel_GC3000 0x00003000
#define chipModel_GC4000 0x00004000
+#define chipModel_GC5000 0x00005000
+#define chipModel_GC5200 0x00005200
+#define chipModel_GC6400 0x00006400
#define RGBA_BITS_R 0x00000001
#define RGBA_BITS_G 0x00000002
#define RGBA_BITS_B 0x00000004
@@ -160,7 +171,7 @@ Copyright (C) 2015
#define chipMinorFeatures2_UNK8 0x00000100
#define chipMinorFeatures2_UNK9 0x00000200
#define chipMinorFeatures2_UNK10 0x00000400
-#define chipMinorFeatures2_SAMPLERBASE_16 0x00000800
+#define chipMinorFeatures2_HALTI1 0x00000800
#define chipMinorFeatures2_UNK12 0x00001000
#define chipMinorFeatures2_UNK13 0x00002000
#define chipMinorFeatures2_UNK14 0x00004000
@@ -189,7 +200,7 @@ Copyright (C) 2015
#define chipMinorFeatures3_UNK5 0x00000020
#define chipMinorFeatures3_UNK6 0x00000040
#define chipMinorFeatures3_UNK7 0x00000080
-#define chipMinorFeatures3_UNK8 0x00000100
+#define chipMinorFeatures3_FAST_MSAA 0x00000100
#define chipMinorFeatures3_UNK9 0x00000200
#define chipMinorFeatures3_BUG_FIXES10 0x00000400
#define chipMinorFeatures3_UNK11 0x00000800
@@ -199,7 +210,7 @@ Copyright (C) 2015
#define chipMinorFeatures3_UNK15 0x00008000
#define chipMinorFeatures3_UNK16 0x00010000
#define chipMinorFeatures3_UNK17 0x00020000
-#define chipMinorFeatures3_UNK18 0x00040000
+#define chipMinorFeatures3_ACE 0x00040000
#define chipMinorFeatures3_UNK19 0x00080000
#define chipMinorFeatures3_UNK20 0x00100000
#define chipMinorFeatures3_UNK21 0x00200000
@@ -207,7 +218,7 @@ Copyright (C) 2015
#define chipMinorFeatures3_UNK23 0x00800000
#define chipMinorFeatures3_UNK24 0x01000000
#define chipMinorFeatures3_UNK25 0x02000000
-#define chipMinorFeatures3_UNK26 0x04000000
+#define chipMinorFeatures3_NEW_HZ 0x04000000
#define chipMinorFeatures3_UNK27 0x08000000
#define chipMinorFeatures3_UNK28 0x10000000
#define chipMinorFeatures3_UNK29 0x20000000
@@ -229,9 +240,9 @@ Copyright (C) 2015
#define chipMinorFeatures4_UNK13 0x00002000
#define chipMinorFeatures4_UNK14 0x00004000
#define chipMinorFeatures4_UNK15 0x00008000
-#define chipMinorFeatures4_UNK16 0x00010000
+#define chipMinorFeatures4_HALTI2 0x00010000
#define chipMinorFeatures4_UNK17 0x00020000
-#define chipMinorFeatures4_UNK18 0x00040000
+#define chipMinorFeatures4_SMALL_MSAA 0x00040000
#define chipMinorFeatures4_UNK19 0x00080000
#define chipMinorFeatures4_UNK20 0x00100000
#define chipMinorFeatures4_UNK21 0x00200000
@@ -245,5 +256,37 @@ Copyright (C) 2015
#define chipMinorFeatures4_UNK29 0x20000000
#define chipMinorFeatures4_UNK30 0x40000000
#define chipMinorFeatures4_UNK31 0x80000000
+#define chipMinorFeatures5_UNK0 0x00000001
+#define chipMinorFeatures5_UNK1 0x00000002
+#define chipMinorFeatures5_UNK2 0x00000004
+#define chipMinorFeatures5_UNK3 0x00000008
+#define chipMinorFeatures5_UNK4 0x00000010
+#define chipMinorFeatures5_UNK5 0x00000020
+#define chipMinorFeatures5_UNK6 0x00000040
+#define chipMinorFeatures5_UNK7 0x00000080
+#define chipMinorFeatures5_UNK8 0x00000100
+#define chipMinorFeatures5_HALTI3 0x00000200
+#define chipMinorFeatures5_UNK10 0x00000400
+#define chipMinorFeatures5_UNK11 0x00000800
+#define chipMinorFeatures5_UNK12 0x00001000
+#define chipMinorFeatures5_UNK13 0x00002000
+#define chipMinorFeatures5_UNK14 0x00004000
+#define chipMinorFeatures5_UNK15 0x00008000
+#define chipMinorFeatures5_UNK16 0x00010000
+#define chipMinorFeatures5_UNK17 0x00020000
+#define chipMinorFeatures5_UNK18 0x00040000
+#define chipMinorFeatures5_UNK19 0x00080000
+#define chipMinorFeatures5_UNK20 0x00100000
+#define chipMinorFeatures5_UNK21 0x00200000
+#define chipMinorFeatures5_UNK22 0x00400000
+#define chipMinorFeatures5_UNK23 0x00800000
+#define chipMinorFeatures5_UNK24 0x01000000
+#define chipMinorFeatures5_UNK25 0x02000000
+#define chipMinorFeatures5_UNK26 0x04000000
+#define chipMinorFeatures5_UNK27 0x08000000
+#define chipMinorFeatures5_UNK28 0x10000000
+#define chipMinorFeatures5_UNK29 0x20000000
+#define chipMinorFeatures5_UNK30 0x40000000
+#define chipMinorFeatures5_UNK31 0x80000000
#endif /* COMMON_XML */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 5c89ebb52fd2..e8858985f01e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -668,7 +668,6 @@ static struct platform_driver etnaviv_platform_driver = {
.probe = etnaviv_pdev_probe,
.remove = etnaviv_pdev_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "etnaviv",
.of_match_table = dt_match,
},
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index d6bd438bd5be..1cd6046e76b1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -85,7 +85,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
void etnaviv_gem_prime_unpin(struct drm_gem_object *obj);
-void *etnaviv_gem_vaddr(struct drm_gem_object *obj);
+void *etnaviv_gem_vmap(struct drm_gem_object *obj);
int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
struct timespec *timeout);
int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index bf8fa859e8be..4a29eeadbf1e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -201,7 +201,9 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
obj = vram->object;
+ mutex_lock(&obj->lock);
pages = etnaviv_gem_get_pages(obj);
+ mutex_unlock(&obj->lock);
if (pages) {
int j;
@@ -213,8 +215,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
iter.hdr->iova = cpu_to_le64(vram->iova);
- vaddr = etnaviv_gem_vaddr(&obj->base);
- if (vaddr && !IS_ERR(vaddr))
+ vaddr = etnaviv_gem_vmap(&obj->base);
+ if (vaddr)
memcpy(iter.data, vaddr, obj->base.size);
etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 9f77c3b94cc6..4b519e4309b2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -353,25 +353,39 @@ void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj)
drm_gem_object_unreference_unlocked(obj);
}
-void *etnaviv_gem_vaddr(struct drm_gem_object *obj)
+void *etnaviv_gem_vmap(struct drm_gem_object *obj)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
- mutex_lock(&etnaviv_obj->lock);
- if (!etnaviv_obj->vaddr) {
- struct page **pages = etnaviv_gem_get_pages(etnaviv_obj);
-
- if (IS_ERR(pages))
- return ERR_CAST(pages);
+ if (etnaviv_obj->vaddr)
+ return etnaviv_obj->vaddr;
- etnaviv_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
- VM_MAP, pgprot_writecombine(PAGE_KERNEL));
- }
+ mutex_lock(&etnaviv_obj->lock);
+ /*
+ * Need to check again, as we might have raced with another thread
+ * while waiting for the mutex.
+ */
+ if (!etnaviv_obj->vaddr)
+ etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
mutex_unlock(&etnaviv_obj->lock);
return etnaviv_obj->vaddr;
}
+static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
+{
+ struct page **pages;
+
+ lockdep_assert_held(&obj->lock);
+
+ pages = etnaviv_gem_get_pages(obj);
+ if (IS_ERR(pages))
+ return NULL;
+
+ return vmap(pages, obj->base.size >> PAGE_SHIFT,
+ VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+}
+
static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
{
if (op & ETNA_PREP_READ)
@@ -522,6 +536,7 @@ static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
.get_pages = etnaviv_gem_shmem_get_pages,
.release = etnaviv_gem_shmem_release,
+ .vmap = etnaviv_gem_vmap_impl,
};
void etnaviv_gem_free_object(struct drm_gem_object *obj)
@@ -866,6 +881,7 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
.get_pages = etnaviv_gem_userptr_get_pages,
.release = etnaviv_gem_userptr_release,
+ .vmap = etnaviv_gem_vmap_impl,
};
int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index a300b4b3d545..ab5df8147a5f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -78,6 +78,7 @@ struct etnaviv_gem_object *to_etnaviv_bo(struct drm_gem_object *obj)
struct etnaviv_gem_ops {
int (*get_pages)(struct etnaviv_gem_object *);
void (*release)(struct etnaviv_gem_object *);
+ void *(*vmap)(struct etnaviv_gem_object *);
};
static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index e94db4f95770..4e67395f5fa1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -31,7 +31,7 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj)
{
- return etnaviv_gem_vaddr(obj);
+ return etnaviv_gem_vmap(obj);
}
void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
@@ -77,9 +77,17 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt);
}
+static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
+{
+ lockdep_assert_held(&etnaviv_obj->lock);
+
+ return dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf);
+}
+
static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {
/* .get_pages should never be called */
.release = etnaviv_gem_prime_release,
+ .vmap = etnaviv_gem_prime_vmap_impl,
};
struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 056a72e6ed26..a33162cf4f4c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -72,6 +72,14 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
*value = gpu->identity.minor_features3;
break;
+ case ETNAVIV_PARAM_GPU_FEATURES_5:
+ *value = gpu->identity.minor_features4;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_6:
+ *value = gpu->identity.minor_features5;
+ break;
+
case ETNAVIV_PARAM_GPU_STREAM_COUNT:
*value = gpu->identity.stream_count;
break;
@@ -112,6 +120,10 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
*value = gpu->identity.num_constants;
break;
+ case ETNAVIV_PARAM_GPU_NUM_VARYINGS:
+ *value = gpu->identity.varyings_count;
+ break;
+
default:
DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
return -EINVAL;
@@ -120,46 +132,56 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
return 0;
}
+
+#define etnaviv_is_model_rev(gpu, mod, rev) \
+ ((gpu)->identity.model == chipModel_##mod && \
+ (gpu)->identity.revision == rev)
+#define etnaviv_field(val, field) \
+ (((val) & field##__MASK) >> field##__SHIFT)
+
static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
{
if (gpu->identity.minor_features0 &
chipMinorFeatures0_MORE_MINOR_FEATURES) {
- u32 specs[2];
+ u32 specs[4];
+ unsigned int streams;
specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
-
- gpu->identity.stream_count =
- (specs[0] & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK)
- >> VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT;
- gpu->identity.register_max =
- (specs[0] & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK)
- >> VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT;
- gpu->identity.thread_count =
- (specs[0] & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK)
- >> VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT;
- gpu->identity.vertex_cache_size =
- (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK)
- >> VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT;
- gpu->identity.shader_core_count =
- (specs[0] & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK)
- >> VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT;
- gpu->identity.pixel_pipes =
- (specs[0] & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK)
- >> VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT;
+ specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3);
+ specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4);
+
+ gpu->identity.stream_count = etnaviv_field(specs[0],
+ VIVS_HI_CHIP_SPECS_STREAM_COUNT);
+ gpu->identity.register_max = etnaviv_field(specs[0],
+ VIVS_HI_CHIP_SPECS_REGISTER_MAX);
+ gpu->identity.thread_count = etnaviv_field(specs[0],
+ VIVS_HI_CHIP_SPECS_THREAD_COUNT);
+ gpu->identity.vertex_cache_size = etnaviv_field(specs[0],
+ VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE);
+ gpu->identity.shader_core_count = etnaviv_field(specs[0],
+ VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT);
+ gpu->identity.pixel_pipes = etnaviv_field(specs[0],
+ VIVS_HI_CHIP_SPECS_PIXEL_PIPES);
gpu->identity.vertex_output_buffer_size =
- (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK)
- >> VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT;
-
- gpu->identity.buffer_size =
- (specs[1] & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK)
- >> VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT;
- gpu->identity.instruction_count =
- (specs[1] & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK)
- >> VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT;
- gpu->identity.num_constants =
- (specs[1] & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK)
- >> VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT;
+ etnaviv_field(specs[0],
+ VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE);
+
+ gpu->identity.buffer_size = etnaviv_field(specs[1],
+ VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE);
+ gpu->identity.instruction_count = etnaviv_field(specs[1],
+ VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT);
+ gpu->identity.num_constants = etnaviv_field(specs[1],
+ VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS);
+
+ gpu->identity.varyings_count = etnaviv_field(specs[2],
+ VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT);
+
+ /* This overrides the value from older register if non-zero */
+ streams = etnaviv_field(specs[3],
+ VIVS_HI_CHIP_SPECS_4_STREAM_COUNT);
+ if (streams)
+ gpu->identity.stream_count = streams;
}
/* Fill in the stream count if not specified */
@@ -173,7 +195,7 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
/* Convert the register max value */
if (gpu->identity.register_max)
gpu->identity.register_max = 1 << gpu->identity.register_max;
- else if (gpu->identity.model == 0x0400)
+ else if (gpu->identity.model == chipModel_GC400)
gpu->identity.register_max = 32;
else
gpu->identity.register_max = 64;
@@ -181,10 +203,10 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
/* Convert thread count */
if (gpu->identity.thread_count)
gpu->identity.thread_count = 1 << gpu->identity.thread_count;
- else if (gpu->identity.model == 0x0400)
+ else if (gpu->identity.model == chipModel_GC400)
gpu->identity.thread_count = 64;
- else if (gpu->identity.model == 0x0500 ||
- gpu->identity.model == 0x0530)
+ else if (gpu->identity.model == chipModel_GC500 ||
+ gpu->identity.model == chipModel_GC530)
gpu->identity.thread_count = 128;
else
gpu->identity.thread_count = 256;
@@ -206,7 +228,7 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
if (gpu->identity.vertex_output_buffer_size) {
gpu->identity.vertex_output_buffer_size =
1 << gpu->identity.vertex_output_buffer_size;
- } else if (gpu->identity.model == 0x0400) {
+ } else if (gpu->identity.model == chipModel_GC400) {
if (gpu->identity.revision < 0x4000)
gpu->identity.vertex_output_buffer_size = 512;
else if (gpu->identity.revision < 0x4200)
@@ -219,9 +241,8 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
switch (gpu->identity.instruction_count) {
case 0:
- if ((gpu->identity.model == 0x2000 &&
- gpu->identity.revision == 0x5108) ||
- gpu->identity.model == 0x880)
+ if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
+ gpu->identity.model == chipModel_GC880)
gpu->identity.instruction_count = 512;
else
gpu->identity.instruction_count = 256;
@@ -242,6 +263,30 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
if (gpu->identity.num_constants == 0)
gpu->identity.num_constants = 168;
+
+ if (gpu->identity.varyings_count == 0) {
+ if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0)
+ gpu->identity.varyings_count = 12;
+ else
+ gpu->identity.varyings_count = 8;
+ }
+
+ /*
+ * For some cores, two varyings are consumed for position, so the
+ * maximum varying count needs to be reduced by one.
+ */
+ if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) ||
+ etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
+ etnaviv_is_model_rev(gpu, GC4000, 0x5245) ||
+ etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
+ etnaviv_is_model_rev(gpu, GC3000, 0x5435) ||
+ etnaviv_is_model_rev(gpu, GC2200, 0x5244) ||
+ etnaviv_is_model_rev(gpu, GC2100, 0x5108) ||
+ etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
+ etnaviv_is_model_rev(gpu, GC1500, 0x5246) ||
+ etnaviv_is_model_rev(gpu, GC880, 0x5107) ||
+ etnaviv_is_model_rev(gpu, GC880, 0x5106))
+ gpu->identity.varyings_count -= 1;
}
static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
@@ -251,12 +296,10 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
/* Special case for older graphic cores. */
- if (((chipIdentity & VIVS_HI_CHIP_IDENTITY_FAMILY__MASK)
- >> VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT) == 0x01) {
- gpu->identity.model = 0x500; /* gc500 */
- gpu->identity.revision =
- (chipIdentity & VIVS_HI_CHIP_IDENTITY_REVISION__MASK)
- >> VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT;
+ if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) {
+ gpu->identity.model = chipModel_GC500;
+ gpu->identity.revision = etnaviv_field(chipIdentity,
+ VIVS_HI_CHIP_IDENTITY_REVISION);
} else {
gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
@@ -269,13 +312,12 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
* same. Only for GC400 family.
*/
if ((gpu->identity.model & 0xff00) == 0x0400 &&
- gpu->identity.model != 0x0420) {
+ gpu->identity.model != chipModel_GC420) {
gpu->identity.model = gpu->identity.model & 0x0400;
}
/* Another special case */
- if (gpu->identity.model == 0x300 &&
- gpu->identity.revision == 0x2201) {
+ if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
@@ -295,11 +337,13 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
/* Disable fast clear on GC700. */
- if (gpu->identity.model == 0x700)
+ if (gpu->identity.model == chipModel_GC700)
gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
- if ((gpu->identity.model == 0x500 && gpu->identity.revision < 2) ||
- (gpu->identity.model == 0x300 && gpu->identity.revision < 0x2000)) {
+ if ((gpu->identity.model == chipModel_GC500 &&
+ gpu->identity.revision < 2) ||
+ (gpu->identity.model == chipModel_GC300 &&
+ gpu->identity.revision < 0x2000)) {
/*
* GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
@@ -309,6 +353,8 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
gpu->identity.minor_features1 = 0;
gpu->identity.minor_features2 = 0;
gpu->identity.minor_features3 = 0;
+ gpu->identity.minor_features4 = 0;
+ gpu->identity.minor_features5 = 0;
} else
gpu->identity.minor_features0 =
gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
@@ -321,6 +367,10 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
gpu->identity.minor_features3 =
gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
+ gpu->identity.minor_features4 =
+ gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4);
+ gpu->identity.minor_features5 =
+ gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
}
/* GC600 idle register reports zero bits where modules aren't present */
@@ -441,10 +491,9 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
{
u16 prefetch;
- if (gpu->identity.model == chipModel_GC320 &&
- gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400 &&
- (gpu->identity.revision == 0x5007 ||
- gpu->identity.revision == 0x5220)) {
+ if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
+ etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
+ gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
u32 mc_memory_debug;
mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
@@ -466,7 +515,7 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
VIVS_HI_AXI_CONFIG_ARCACHE(2));
/* GC2000 rev 5108 needs a special bus config */
- if (gpu->identity.model == 0x2000 && gpu->identity.revision == 0x5108) {
+ if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) {
u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
@@ -511,8 +560,16 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
if (gpu->identity.model == 0) {
dev_err(gpu->dev, "Unknown GPU model\n");
- pm_runtime_put_autosuspend(gpu->dev);
- return -ENXIO;
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ /* Exclude VG cores with FE2.0 */
+ if (gpu->identity.features & chipFeatures_PIPE_VG &&
+ gpu->identity.features & chipFeatures_FE20) {
+ dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
+ ret = -ENXIO;
+ goto fail;
}
ret = etnaviv_hw_reset(gpu);
@@ -539,10 +596,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
goto fail;
}
- /* TODO: we will leak here memory - fix it! */
-
gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
if (!gpu->mmu) {
+ iommu_domain_free(iommu);
ret = -ENOMEM;
goto fail;
}
@@ -552,7 +608,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
if (!gpu->buffer) {
ret = -ENOMEM;
dev_err(gpu->dev, "could not create command buffer\n");
- goto fail;
+ goto destroy_iommu;
}
if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
ret = -EINVAL;
@@ -582,6 +638,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
free_buffer:
etnaviv_gpu_cmdbuf_free(gpu->buffer);
gpu->buffer = NULL;
+destroy_iommu:
+ etnaviv_iommu_destroy(gpu->mmu);
+ gpu->mmu = NULL;
fail:
pm_runtime_mark_last_busy(gpu->dev);
pm_runtime_put_autosuspend(gpu->dev);
@@ -642,6 +701,10 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
gpu->identity.minor_features2);
seq_printf(m, "\t minor_features3: 0x%08x\n",
gpu->identity.minor_features3);
+ seq_printf(m, "\t minor_features4: 0x%08x\n",
+ gpu->identity.minor_features4);
+ seq_printf(m, "\t minor_features5: 0x%08x\n",
+ gpu->identity.minor_features5);
seq_puts(m, "\tspecs\n");
seq_printf(m, "\t stream_count: %d\n",
@@ -664,6 +727,8 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
gpu->identity.instruction_count);
seq_printf(m, "\t num_constants: %d\n",
gpu->identity.num_constants);
+ seq_printf(m, "\t varyings_count: %d\n",
+ gpu->identity.varyings_count);
seq_printf(m, "\taxi: 0x%08x\n", axi);
seq_printf(m, "\tidle: 0x%08x\n", idle);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index c75d50359ab0..f233ac4c7c1c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -46,6 +46,12 @@ struct etnaviv_chip_identity {
/* Supported minor feature 3 fields. */
u32 minor_features3;
+ /* Supported minor feature 4 fields. */
+ u32 minor_features4;
+
+ /* Supported minor feature 5 fields. */
+ u32 minor_features5;
+
/* Number of streams supported. */
u32 stream_count;
@@ -75,6 +81,9 @@ struct etnaviv_chip_identity {
/* Buffer size */
u32 buffer_size;
+
+ /* Number of varyings */
+ u8 varyings_count;
};
struct etnaviv_event {
diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h
index 0064f2640396..6a7de5f1454a 100644
--- a/drivers/gpu/drm/etnaviv/state_hi.xml.h
+++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h
@@ -8,8 +8,8 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
git clone git://0x04.net/rules-ng-ng
The rules-ng-ng source files this header was generated from are:
-- state_hi.xml ( 23420 bytes, from 2015-03-25 11:47:21)
-- common.xml ( 18437 bytes, from 2015-03-25 11:27:41)
+- state_hi.xml ( 24309 bytes, from 2015-12-12 09:02:53)
+- common.xml ( 18437 bytes, from 2015-12-12 09:02:53)
Copyright (C) 2015
*/
@@ -182,8 +182,25 @@ Copyright (C) 2015
#define VIVS_HI_CHIP_MINOR_FEATURE_3 0x00000088
+#define VIVS_HI_CHIP_SPECS_3 0x0000008c
+#define VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__MASK 0x000001f0
+#define VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__SHIFT 4
+#define VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__MASK)
+#define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__MASK 0x00000007
+#define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__SHIFT 0
+#define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__MASK)
+
#define VIVS_HI_CHIP_MINOR_FEATURE_4 0x00000094
+#define VIVS_HI_CHIP_SPECS_4 0x0000009c
+#define VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__MASK 0x0001f000
+#define VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__SHIFT 12
+#define VIVS_HI_CHIP_SPECS_4_STREAM_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__MASK)
+
+#define VIVS_HI_CHIP_MINOR_FEATURE_5 0x000000a0
+
+#define VIVS_HI_CHIP_PRODUCT_ID 0x000000a8
+
#define VIVS_PM 0x00000000
#define VIVS_PM_POWER_CONTROLS 0x00000100
@@ -206,6 +223,11 @@ Copyright (C) 2015
#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE 0x00000001
#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_DE 0x00000002
#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_PE 0x00000004
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_SH 0x00000008
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_PA 0x00000010
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_SE 0x00000020
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_RA 0x00000040
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_TX 0x00000080
#define VIVS_PM_PULSE_EATER 0x0000010c
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index b79c316c2ad2..673164b331c8 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -1392,7 +1392,7 @@ static const struct component_ops exynos_dp_ops = {
static int exynos_dp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *panel_node = NULL, *bridge_node, *endpoint = NULL;
+ struct device_node *np = NULL, *endpoint = NULL;
struct exynos_dp_device *dp;
int ret;
@@ -1404,41 +1404,36 @@ static int exynos_dp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dp);
/* This is for the backward compatibility. */
- panel_node = of_parse_phandle(dev->of_node, "panel", 0);
- if (panel_node) {
- dp->panel = of_drm_find_panel(panel_node);
- of_node_put(panel_node);
+ np = of_parse_phandle(dev->of_node, "panel", 0);
+ if (np) {
+ dp->panel = of_drm_find_panel(np);
+ of_node_put(np);
if (!dp->panel)
return -EPROBE_DEFER;
- } else {
- endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
- if (endpoint) {
- panel_node = of_graph_get_remote_port_parent(endpoint);
- if (panel_node) {
- dp->panel = of_drm_find_panel(panel_node);
- of_node_put(panel_node);
- if (!dp->panel)
- return -EPROBE_DEFER;
- } else {
- DRM_ERROR("no port node for panel device.\n");
- return -EINVAL;
- }
- }
- }
-
- if (endpoint)
goto out;
+ }
endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
if (endpoint) {
- bridge_node = of_graph_get_remote_port_parent(endpoint);
- if (bridge_node) {
- dp->ptn_bridge = of_drm_find_bridge(bridge_node);
- of_node_put(bridge_node);
- if (!dp->ptn_bridge)
- return -EPROBE_DEFER;
- } else
- return -EPROBE_DEFER;
+ np = of_graph_get_remote_port_parent(endpoint);
+ if (np) {
+ /* The remote port can be either a panel or a bridge */
+ dp->panel = of_drm_find_panel(np);
+ if (!dp->panel) {
+ dp->ptn_bridge = of_drm_find_bridge(np);
+ if (!dp->ptn_bridge) {
+ of_node_put(np);
+ return -EPROBE_DEFER;
+ }
+ }
+ of_node_put(np);
+ } else {
+ DRM_ERROR("no remote endpoint device node found.\n");
+ return -EINVAL;
+ }
+ } else {
+ DRM_ERROR("no port endpoint subnode found.\n");
+ return -EINVAL;
}
out:
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index d84a498ef099..e977a81af2e6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1906,8 +1906,7 @@ static int exynos_dsi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int exynos_dsi_suspend(struct device *dev)
+static int __maybe_unused exynos_dsi_suspend(struct device *dev)
{
struct drm_encoder *encoder = dev_get_drvdata(dev);
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
@@ -1938,7 +1937,7 @@ static int exynos_dsi_suspend(struct device *dev)
return 0;
}
-static int exynos_dsi_resume(struct device *dev)
+static int __maybe_unused exynos_dsi_resume(struct device *dev)
{
struct drm_encoder *encoder = dev_get_drvdata(dev);
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
@@ -1972,7 +1971,6 @@ err_clk:
return ret;
}
-#endif
static const struct dev_pm_ops exynos_dsi_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume, NULL)
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index b5fbc1cbf024..0a5a60005f7e 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1289,8 +1289,7 @@ static int mixer_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int exynos_mixer_suspend(struct device *dev)
+static int __maybe_unused exynos_mixer_suspend(struct device *dev)
{
struct mixer_context *ctx = dev_get_drvdata(dev);
struct mixer_resources *res = &ctx->mixer_res;
@@ -1306,7 +1305,7 @@ static int exynos_mixer_suspend(struct device *dev)
return 0;
}
-static int exynos_mixer_resume(struct device *dev)
+static int __maybe_unused exynos_mixer_resume(struct device *dev)
{
struct mixer_context *ctx = dev_get_drvdata(dev);
struct mixer_resources *res = &ctx->mixer_res;
@@ -1342,7 +1341,6 @@ static int exynos_mixer_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops exynos_mixer_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_mixer_suspend, exynos_mixer_resume, NULL)
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index 533d1e3d4a99..a02112ba1c3d 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -136,6 +136,7 @@ static bool adv7511_register_volatile(struct device *dev, unsigned int reg)
case ADV7511_REG_BKSV(3):
case ADV7511_REG_BKSV(4):
case ADV7511_REG_DDC_STATUS:
+ case ADV7511_REG_EDID_READ_CTRL:
case ADV7511_REG_BSTATUS(0):
case ADV7511_REG_BSTATUS(1):
case ADV7511_REG_CHIP_ID_HIGH:
@@ -362,24 +363,31 @@ static void adv7511_power_on(struct adv7511 *adv7511)
{
adv7511->current_edid_segment = -1;
- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_EDID_READY);
- regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
- ADV7511_INT1_DDC_ERROR);
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN, 0);
+ if (adv7511->i2c_main->irq) {
+ /*
+ * Documentation says the INT_ENABLE registers are reset in
+ * POWER_DOWN mode. My 7511w preserved the bits, however.
+ * Still, let's be safe and stick to the documentation.
+ */
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+ ADV7511_INT1_DDC_ERROR);
+ }
/*
- * Per spec it is allowed to pulse the HDP signal to indicate that the
+ * Per spec it is allowed to pulse the HPD signal to indicate that the
* EDID information has changed. Some monitors do this when they wakeup
- * from standby or are enabled. When the HDP goes low the adv7511 is
+ * from standby or are enabled. When the HPD goes low the adv7511 is
* reset and the outputs are disabled which might cause the monitor to
- * go to standby again. To avoid this we ignore the HDP pin for the
+ * go to standby again. To avoid this we ignore the HPD pin for the
* first few seconds after enabling the output.
*/
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
- ADV7511_REG_POWER2_HDP_SRC_MASK,
- ADV7511_REG_POWER2_HDP_SRC_NONE);
+ ADV7511_REG_POWER2_HPD_SRC_MASK,
+ ADV7511_REG_POWER2_HPD_SRC_NONE);
/*
* Most of the registers are reset during power down or when HPD is low.
@@ -413,9 +421,9 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
if (ret < 0)
return false;
- if (irq0 & ADV7511_INT0_HDP) {
+ if (irq0 & ADV7511_INT0_HPD) {
regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_HDP);
+ ADV7511_INT0_HPD);
return true;
}
@@ -438,7 +446,7 @@ static int adv7511_irq_process(struct adv7511 *adv7511)
regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
- if (irq0 & ADV7511_INT0_HDP && adv7511->encoder)
+ if (irq0 & ADV7511_INT0_HPD && adv7511->encoder)
drm_helper_hpd_irq_event(adv7511->encoder->dev);
if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
@@ -567,12 +575,14 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
/* Reading the EDID only works if the device is powered */
if (!adv7511->powered) {
- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_EDID_READY);
- regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
- ADV7511_INT1_DDC_ERROR);
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN, 0);
+ if (adv7511->i2c_main->irq) {
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+ ADV7511_INT1_DDC_ERROR);
+ }
adv7511->current_edid_segment = -1;
}
@@ -638,10 +648,10 @@ adv7511_encoder_detect(struct drm_encoder *encoder,
if (adv7511->status == connector_status_connected)
status = connector_status_disconnected;
} else {
- /* Renable HDP sensing */
+ /* Renable HPD sensing */
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
- ADV7511_REG_POWER2_HDP_SRC_MASK,
- ADV7511_REG_POWER2_HDP_SRC_BOTH);
+ ADV7511_REG_POWER2_HPD_SRC_MASK,
+ ADV7511_REG_POWER2_HPD_SRC_BOTH);
}
adv7511->status = status;
diff --git a/drivers/gpu/drm/i2c/adv7511.h b/drivers/gpu/drm/i2c/adv7511.h
index 6599ed538426..38515b30cedf 100644
--- a/drivers/gpu/drm/i2c/adv7511.h
+++ b/drivers/gpu/drm/i2c/adv7511.h
@@ -90,7 +90,7 @@
#define ADV7511_CSC_ENABLE BIT(7)
#define ADV7511_CSC_UPDATE_MODE BIT(5)
-#define ADV7511_INT0_HDP BIT(7)
+#define ADV7511_INT0_HPD BIT(7)
#define ADV7511_INT0_VSYNC BIT(5)
#define ADV7511_INT0_AUDIO_FIFO_FULL BIT(4)
#define ADV7511_INT0_EDID_READY BIT(2)
@@ -157,11 +157,11 @@
#define ADV7511_PACKET_ENABLE_SPARE2 BIT(1)
#define ADV7511_PACKET_ENABLE_SPARE1 BIT(0)
-#define ADV7511_REG_POWER2_HDP_SRC_MASK 0xc0
-#define ADV7511_REG_POWER2_HDP_SRC_BOTH 0x00
-#define ADV7511_REG_POWER2_HDP_SRC_HDP 0x40
-#define ADV7511_REG_POWER2_HDP_SRC_CEC 0x80
-#define ADV7511_REG_POWER2_HDP_SRC_NONE 0xc0
+#define ADV7511_REG_POWER2_HPD_SRC_MASK 0xc0
+#define ADV7511_REG_POWER2_HPD_SRC_BOTH 0x00
+#define ADV7511_REG_POWER2_HPD_SRC_HPD 0x40
+#define ADV7511_REG_POWER2_HPD_SRC_CEC 0x80
+#define ADV7511_REG_POWER2_HPD_SRC_NONE 0xc0
#define ADV7511_REG_POWER2_TDMS_ENABLE BIT(4)
#define ADV7511_REG_POWER2_GATE_INPUT_CLK BIT(0)
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index fcd77b27514d..051eab33e4c7 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -10,7 +10,6 @@ config DRM_I915
# the shmem_readpage() which depends upon tmpfs
select SHMEM
select TMPFS
- select STOP_MACHINE
select DRM_KMS_HELPER
select DRM_PANEL
select DRM_MIPI_DSI
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3ac616d7363b..f357058c74d9 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -501,7 +501,9 @@ void intel_detect_pch(struct drm_device *dev)
WARN_ON(!IS_SKYLAKE(dev) &&
!IS_KABYLAKE(dev));
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
- (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE)) {
+ ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
+ pch->subsystem_vendor == 0x1af4 &&
+ pch->subsystem_device == 0x1100)) {
dev_priv->pch_type = intel_virt_detect_pch(dev);
} else
continue;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2f00828ccc6e..5feb65725c04 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2946,7 +2946,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
struct i915_vma *vma;
u64 offset;
- intel_fill_fb_ggtt_view(&view, intel_plane->base.fb,
+ intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
intel_plane->base.state);
vma = i915_gem_obj_to_ggtt_view(obj, &view);
@@ -12075,11 +12075,21 @@ connected_sink_compute_bpp(struct intel_connector *connector,
pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
}
- /* Clamp bpp to 8 on screens without EDID 1.4 */
- if (connector->base.display_info.bpc == 0 && bpp > 24) {
- DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
- bpp);
- pipe_config->pipe_bpp = 24;
+ /* Clamp bpp to default limit on screens without EDID 1.4 */
+ if (connector->base.display_info.bpc == 0) {
+ int type = connector->base.connector_type;
+ int clamp_bpp = 24;
+
+ /* Fall back to 18 bpp when DP sink capability is unknown. */
+ if (type == DRM_MODE_CONNECTOR_DisplayPort ||
+ type == DRM_MODE_CONNECTOR_eDP)
+ clamp_bpp = 18;
+
+ if (bpp > clamp_bpp) {
+ DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
+ bpp, clamp_bpp);
+ pipe_config->pipe_bpp = clamp_bpp;
+ }
}
}
@@ -13883,11 +13893,12 @@ intel_check_primary_plane(struct drm_plane *plane,
int max_scale = DRM_PLANE_HELPER_NO_SCALING;
bool can_position = false;
- /* use scaler when colorkey is not required */
- if (INTEL_INFO(plane->dev)->gen >= 9 &&
- state->ckey.flags == I915_SET_COLORKEY_NONE) {
- min_scale = 1;
- max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
+ if (INTEL_INFO(plane->dev)->gen >= 9) {
+ /* use scaler when colorkey is not required */
+ if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
+ min_scale = 1;
+ max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
+ }
can_position = true;
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 3aa614731d7e..f1fa756c5d5d 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1707,6 +1707,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
if (flush_domains) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 339701d7a9a5..40c6aff57256 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -331,6 +331,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
if (flush_domains) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
}
if (invalidate_domains) {
@@ -403,6 +404,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
if (flush_domains) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
}
if (invalidate_domains) {
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 6bfc46369db1..367a916f364e 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -304,18 +304,10 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev,
unsigned int div = (RREG32(DENTIST_DISPCLK_CNTL) &
DENTIST_DPREFCLK_WDIVIDER_MASK) >>
DENTIST_DPREFCLK_WDIVIDER_SHIFT;
-
- if (div < 128 && div >= 96)
- div -= 64;
- else if (div >= 64)
- div = div / 2 - 16;
- else if (div >= 8)
- div /= 4;
- else
- div = 0;
+ div = radeon_audio_decode_dfs_div(div);
if (div)
- clock = rdev->clock.gpupll_outputfreq * 10 / div;
+ clock = clock * 100 / div;
WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 9953356fe263..3cf04a2f44bb 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -289,6 +289,16 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev,
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
*/
+ if (ASIC_IS_DCE41(rdev)) {
+ unsigned int div = (RREG32(DCE41_DENTIST_DISPCLK_CNTL) &
+ DENTIST_DPREFCLK_WDIVIDER_MASK) >>
+ DENTIST_DPREFCLK_WDIVIDER_SHIFT;
+ div = radeon_audio_decode_dfs_div(div);
+
+ if (div)
+ clock = 100 * clock / div;
+ }
+
WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
}
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 4aa5f755572b..13b6029d65cc 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -511,6 +511,11 @@
#define DCCG_AUDIO_DTO1_CNTL 0x05cc
# define DCCG_AUDIO_DTO1_USE_512FBR_DTO (1 << 3)
+#define DCE41_DENTIST_DISPCLK_CNTL 0x049c
+# define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24)
+# define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24)
+# define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24
+
/* DCE 4.0 AFMT */
#define HDMI_CONTROL 0x7030
# define HDMI_KEEPOUT_MODE (1 << 0)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5ae6db98aa4d..78a51b3eda10 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -268,7 +268,7 @@ struct radeon_clock {
uint32_t current_dispclk;
uint32_t dp_extclk;
uint32_t max_pixel_clock;
- uint32_t gpupll_outputfreq;
+ uint32_t vco_freq;
};
/*
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 08fc1b5effa8..de9a2ffcf5f7 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1106,6 +1106,31 @@ union firmware_info {
ATOM_FIRMWARE_INFO_V2_2 info_22;
};
+union igp_info {
+ struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
+};
+
+static void radeon_atombios_get_dentist_vco_freq(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+ union igp_info *igp_info;
+ u8 frev, crev;
+ u16 data_offset;
+
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ igp_info = (union igp_info *)(mode_info->atom_context->bios +
+ data_offset);
+ rdev->clock.vco_freq =
+ le32_to_cpu(igp_info->info_6.ulDentistVCOFreq);
+ }
+}
+
bool radeon_atom_get_clock_info(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
@@ -1257,12 +1282,18 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
rdev->mode_info.firmware_flags =
le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
- if (ASIC_IS_DCE8(rdev)) {
- rdev->clock.gpupll_outputfreq =
+ if (ASIC_IS_DCE8(rdev))
+ rdev->clock.vco_freq =
le32_to_cpu(firmware_info->info_22.ulGPUPLL_OutputFreq);
- if (rdev->clock.gpupll_outputfreq == 0)
- rdev->clock.gpupll_outputfreq = 360000; /* 3.6 GHz */
- }
+ else if (ASIC_IS_DCE5(rdev))
+ rdev->clock.vco_freq = rdev->clock.current_dispclk;
+ else if (ASIC_IS_DCE41(rdev))
+ radeon_atombios_get_dentist_vco_freq(rdev);
+ else
+ rdev->clock.vco_freq = rdev->clock.current_dispclk;
+
+ if (rdev->clock.vco_freq == 0)
+ rdev->clock.vco_freq = 360000; /* 3.6 GHz */
return true;
}
@@ -1270,14 +1301,6 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
return false;
}
-union igp_info {
- struct _ATOM_INTEGRATED_SYSTEM_INFO info;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
-};
-
bool radeon_atombios_sideport_present(struct radeon_device *rdev)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index 2c02e99b5f95..b214663b370d 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -739,9 +739,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector_atom_dig *dig_connector =
- radeon_connector->con_priv;
if (!dig || !dig->afmt)
return;
@@ -753,10 +750,7 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
radeon_audio_write_speaker_allocation(encoder);
radeon_audio_write_sad_regs(encoder);
radeon_audio_write_latency_fields(encoder, mode);
- if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
- radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
- else
- radeon_audio_set_dto(encoder, dig_connector->dp_clock);
+ radeon_audio_set_dto(encoder, rdev->clock.vco_freq * 10);
radeon_audio_set_audio_packet(encoder);
radeon_audio_select_pin(encoder);
@@ -781,3 +775,15 @@ void radeon_audio_dpms(struct drm_encoder *encoder, int mode)
if (radeon_encoder->audio && radeon_encoder->audio->dpms)
radeon_encoder->audio->dpms(encoder, mode == DRM_MODE_DPMS_ON);
}
+
+unsigned int radeon_audio_decode_dfs_div(unsigned int div)
+{
+ if (div >= 8 && div < 64)
+ return (div - 8) * 25 + 200;
+ else if (div >= 64 && div < 96)
+ return (div - 64) * 50 + 1600;
+ else if (div >= 96 && div < 128)
+ return (div - 96) * 100 + 3200;
+ else
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
index 059cc3012062..5c70cceaa4a6 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.h
+++ b/drivers/gpu/drm/radeon/radeon_audio.h
@@ -79,5 +79,6 @@ void radeon_audio_fini(struct radeon_device *rdev);
void radeon_audio_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode);
void radeon_audio_dpms(struct drm_encoder *encoder, int mode);
+unsigned int radeon_audio_decode_dfs_div(unsigned int div);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index b3bb92368ae0..298ea1c453c3 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1670,8 +1670,10 @@ int radeon_modeset_init(struct radeon_device *rdev)
/* setup afmt */
radeon_afmt_init(rdev);
- radeon_fbdev_init(rdev);
- drm_kms_helper_poll_init(rdev->ddev);
+ if (!list_empty(&rdev->ddev->mode_config.connector_list)) {
+ radeon_fbdev_init(rdev);
+ drm_kms_helper_poll_init(rdev->ddev);
+ }
/* do pm late init */
ret = radeon_pm_late_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 3dcc5733ff69..e26c963f2e93 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -663,6 +663,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
if (!bo_va) {
args->operation = RADEON_VA_RESULT_ERROR;
+ radeon_bo_unreserve(rbo);
drm_gem_object_unreference_unlocked(gobj);
return -ENOENT;
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 84d45633d28c..fb6ad143873f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
+#include <drm/drm_cache.h>
#include "radeon.h"
#include "radeon_trace.h"
@@ -245,6 +246,12 @@ int radeon_bo_create(struct radeon_device *rdev,
DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
"better performance thanks to write-combining\n");
bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
+#else
+ /* For architectures that don't support WC memory,
+ * mask out the WC flag from the BO
+ */
+ if (!drm_arch_can_wc_memory())
+ bo->flags &= ~RADEON_GEM_GTT_WC;
#endif
radeon_ttm_placement_from_domain(bo, domain);
diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c
index 07a0d378e122..a01efe39a820 100644
--- a/drivers/gpu/drm/radeon/vce_v1_0.c
+++ b/drivers/gpu/drm/radeon/vce_v1_0.c
@@ -178,12 +178,12 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
return -EINVAL;
}
- for (i = 0; i < sign->num; ++i) {
- if (sign->val[i].chip_id == chip_id)
+ for (i = 0; i < le32_to_cpu(sign->num); ++i) {
+ if (le32_to_cpu(sign->val[i].chip_id) == chip_id)
break;
}
- if (i == sign->num)
+ if (i == le32_to_cpu(sign->num))
return -EINVAL;
data += (256 - 64) / 4;
@@ -191,18 +191,18 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
data[1] = sign->val[i].nonce[1];
data[2] = sign->val[i].nonce[2];
data[3] = sign->val[i].nonce[3];
- data[4] = sign->len + 64;
+ data[4] = cpu_to_le32(le32_to_cpu(sign->len) + 64);
memset(&data[5], 0, 44);
memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign));
- data += data[4] / 4;
+ data += le32_to_cpu(data[4]) / 4;
data[0] = sign->val[i].sigval[0];
data[1] = sign->val[i].sigval[1];
data[2] = sign->val[i].sigval[2];
data[3] = sign->val[i].sigval[3];
- rdev->vce.keyselect = sign->val[i].keyselect;
+ rdev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect);
return 0;
}
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index d1dc0f7b01db..f6a809afceec 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -2,11 +2,11 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o rockchip_drm_fbdev.o \
- rockchip_drm_gem.o
+rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \
+ rockchip_drm_gem.o rockchip_drm_vop.o
+rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o
obj-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
obj-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o
-obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_drm_vop.o \
- rockchip_vop_reg.o
+obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_vop_reg.o
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
index 7bfe243c6173..f8f8f29fb7c3 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
@@ -461,10 +461,11 @@ static int dw_mipi_dsi_phy_init(struct dw_mipi_dsi *dsi)
static int dw_mipi_dsi_get_lane_bps(struct dw_mipi_dsi *dsi)
{
- unsigned int bpp, i, pre;
+ unsigned int i, pre;
unsigned long mpclk, pllref, tmp;
unsigned int m = 1, n = 1, target_mbps = 1000;
unsigned int max_mbps = dptdin_map[ARRAY_SIZE(dptdin_map) - 1].max_mbps;
+ int bpp;
bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
if (bpp < 0) {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 8397d1b62ef9..a0d51ccb6ea4 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -55,14 +55,12 @@ int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
return arm_iommu_attach_device(dev, mapping);
}
-EXPORT_SYMBOL_GPL(rockchip_drm_dma_attach_device);
void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
struct device *dev)
{
arm_iommu_detach_device(dev);
}
-EXPORT_SYMBOL_GPL(rockchip_drm_dma_detach_device);
int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
const struct rockchip_crtc_funcs *crtc_funcs)
@@ -77,7 +75,6 @@ int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
return 0;
}
-EXPORT_SYMBOL_GPL(rockchip_register_crtc_funcs);
void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc)
{
@@ -89,7 +86,6 @@ void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc)
priv->crtc_funcs[pipe] = NULL;
}
-EXPORT_SYMBOL_GPL(rockchip_unregister_crtc_funcs);
static struct drm_crtc *rockchip_crtc_from_pipe(struct drm_device *drm,
int pipe)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index f7844883cb76..3b8f652698f8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -39,7 +39,6 @@ struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
return rk_fb->obj[plane];
}
-EXPORT_SYMBOL_GPL(rockchip_fb_get_gem_obj);
static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
{
@@ -177,8 +176,23 @@ static void rockchip_crtc_wait_for_update(struct drm_crtc *crtc)
crtc_funcs->wait_for_update(crtc);
}
+/*
+ * We can't use drm_atomic_helper_wait_for_vblanks() because rk3288 and rk3066
+ * have hardware counters for neither vblanks nor scanlines, which results in
+ * a race where:
+ * | <-- HW vsync irq and reg take effect
+ * plane_commit --> |
+ * get_vblank and wait --> |
+ * | <-- handle_vblank, vblank->count + 1
+ * cleanup_fb --> |
+ * iommu crash --> |
+ * | <-- HW vsync irq and reg take effect
+ *
+ * This function is equivalent but uses rockchip_crtc_wait_for_update() instead
+ * of waiting for vblank_count to change.
+ */
static void
-rockchip_atomic_wait_for_complete(struct drm_atomic_state *old_state)
+rockchip_atomic_wait_for_complete(struct drm_device *dev, struct drm_atomic_state *old_state)
{
struct drm_crtc_state *old_crtc_state;
struct drm_crtc *crtc;
@@ -194,6 +208,10 @@ rockchip_atomic_wait_for_complete(struct drm_atomic_state *old_state)
if (!crtc->state->active)
continue;
+ if (!drm_atomic_helper_framebuffer_changed(dev,
+ old_state, crtc))
+ continue;
+
ret = drm_crtc_vblank_get(crtc);
if (ret != 0)
continue;
@@ -241,7 +259,7 @@ rockchip_atomic_commit_complete(struct rockchip_atomic_commit *commit)
drm_atomic_helper_commit_planes(dev, state, true);
- rockchip_atomic_wait_for_complete(state);
+ rockchip_atomic_wait_for_complete(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h
index 50432e9b5b37..73718c5f5bbf 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h
@@ -15,7 +15,18 @@
#ifndef _ROCKCHIP_DRM_FBDEV_H
#define _ROCKCHIP_DRM_FBDEV_H
+#ifdef CONFIG_DRM_FBDEV_EMULATION
int rockchip_drm_fbdev_init(struct drm_device *dev);
void rockchip_drm_fbdev_fini(struct drm_device *dev);
+#else
+static inline int rockchip_drm_fbdev_init(struct drm_device *dev)
+{
+ return 0;
+}
+
+static inline void rockchip_drm_fbdev_fini(struct drm_device *dev)
+{
+}
+#endif
#endif /* _ROCKCHIP_DRM_FBDEV_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index d908321b94ce..18e07338c6e5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -234,13 +234,8 @@ int rockchip_gem_dumb_create(struct drm_file *file_priv,
/*
* align to 64 bytes since Mali requires it.
*/
- min_pitch = ALIGN(min_pitch, 64);
-
- if (args->pitch < min_pitch)
- args->pitch = min_pitch;
-
- if (args->size < args->pitch * args->height)
- args->size = args->pitch * args->height;
+ args->pitch = ALIGN(min_pitch, 64);
+ args->size = args->pitch * args->height;
rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
&args->handle);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 46c2a8dfd8aa..fd370548d7d7 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -43,8 +43,8 @@
#define REG_SET(x, base, reg, v, mode) \
__REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
-#define REG_SET_MASK(x, base, reg, v, mode) \
- __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
+#define REG_SET_MASK(x, base, reg, mask, v, mode) \
+ __REG_SET_##mode(x, base + reg.offset, mask, reg.shift, v)
#define VOP_WIN_SET(x, win, name, v) \
REG_SET(x, win->base, win->phy->name, v, RELAXED)
@@ -58,16 +58,18 @@
#define VOP_INTR_GET(vop, name) \
vop_read_reg(vop, 0, &vop->data->ctrl->name)
-#define VOP_INTR_SET(vop, name, v) \
- REG_SET(vop, 0, vop->data->intr->name, v, NORMAL)
+#define VOP_INTR_SET(vop, name, mask, v) \
+ REG_SET_MASK(vop, 0, vop->data->intr->name, mask, v, NORMAL)
#define VOP_INTR_SET_TYPE(vop, name, type, v) \
do { \
- int i, reg = 0; \
+ int i, reg = 0, mask = 0; \
for (i = 0; i < vop->data->intr->nintrs; i++) { \
- if (vop->data->intr->intrs[i] & type) \
+ if (vop->data->intr->intrs[i] & type) { \
reg |= (v) << i; \
+ mask |= 1 << i; \
+ } \
} \
- VOP_INTR_SET(vop, name, reg); \
+ VOP_INTR_SET(vop, name, mask, reg); \
} while (0)
#define VOP_INTR_GET_TYPE(vop, name, type) \
vop_get_intr_type(vop, &vop->data->intr->name, type)
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 424d515ffcda..314ff71db978 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -144,19 +144,16 @@ int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
}
#endif /* CONFIG_DEBUG_FS */
-/*
- * Asks the firmware to turn on power to the V3D engine.
- *
- * This may be doable with just the clocks interface, though this
- * packet does some other register setup from the firmware, too.
- */
int
vc4_v3d_set_power(struct vc4_dev *vc4, bool on)
{
- if (on)
- return pm_generic_poweroff(&vc4->v3d->pdev->dev);
- else
- return pm_generic_resume(&vc4->v3d->pdev->dev);
+ /* XXX: This interface is needed for GPU reset, and the way to
+ * do it is to turn our power domain off and back on. We
+ * can't just reset from within the driver, because the reset
+ * bits are in the power domain's register area, and get set
+ * during the poweron process.
+ */
+ return 0;
}
static void vc4_v3d_init_hw(struct drm_device *dev)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index c49812b80dd0..24fb348a44e1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -25,6 +25,7 @@
*
**************************************************************************/
#include <linux/module.h>
+#include <linux/console.h>
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
@@ -1538,6 +1539,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static int __init vmwgfx_init(void)
{
int ret;
+
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force())
+ return -EINVAL;
+#endif
+
ret = drm_pci_init(&driver, &vmw_pci_driver);
if (ret)
DRM_ERROR("Failed initializing DRM.\n");
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index c8487894b312..c43318d3416e 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -932,6 +932,17 @@ MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
{
/*
+ * CPU fan speed going up and down on Dell Studio XPS 8000
+ * for unknown reasons.
+ */
+ .ident = "Dell Studio XPS 8000",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8000"),
+ },
+ },
+ {
+ /*
* CPU fan speed going up and down on Dell Studio XPS 8100
* for unknown reasons.
*/
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index f77eb971ce95..4f695d8fcafa 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -90,7 +90,15 @@ static ssize_t show_power(struct device *dev,
pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5),
REG_TDP_LIMIT3, &val);
- tdp_limit = val >> 16;
+ /*
+ * On Carrizo and later platforms, ApmTdpLimit bit field
+ * is extended to 16:31 from 16:28.
+ */
+ if (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model >= 0x60)
+ tdp_limit = val >> 16;
+ else
+ tdp_limit = (val >> 16) & 0x1fff;
+
curr_pwr_watts = ((u64)(tdp_limit +
data->base_tdp)) << running_avg_range;
curr_pwr_watts -= running_avg_capture;
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index 52f708bcf77f..d50c701b19d6 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -313,6 +313,10 @@ int of_hwspin_lock_get_id(struct device_node *np, int index)
hwlock = radix_tree_deref_slot(slot);
if (unlikely(!hwlock))
continue;
+ if (radix_tree_is_indirect_ptr(hwlock)) {
+ slot = radix_tree_iter_retry(&iter);
+ continue;
+ }
if (hwlock->bank->dev->of_node == args.np) {
ret = 0;
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index ba9732c236c5..10fbd6d841e0 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -874,7 +874,8 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
i2c_set_adapdata(adap, dev);
i2c_dw_disable_int(dev);
- r = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr, IRQF_SHARED,
+ r = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr,
+ IRQF_SHARED | IRQF_COND_SUSPEND,
dev_name(dev->dev), dev);
if (r) {
dev_err(dev->dev, "failure requesting irq %i: %d\n",
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index e04598595073..93f2895383ee 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -137,10 +137,11 @@ static const struct dmi_system_id piix4_dmi_ibm[] = {
};
/* SB800 globals */
+static DEFINE_MUTEX(piix4_mutex_sb800);
static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = {
- "SDA0", "SDA2", "SDA3", "SDA4"
+ " port 0", " port 2", " port 3", " port 4"
};
-static const char *piix4_aux_port_name_sb800 = "SDA1";
+static const char *piix4_aux_port_name_sb800 = " port 1";
struct i2c_piix4_adapdata {
unsigned short smba;
@@ -148,7 +149,6 @@ struct i2c_piix4_adapdata {
/* SB800 */
bool sb800_main;
unsigned short port;
- struct mutex *mutex;
};
static int piix4_setup(struct pci_dev *PIIX4_dev,
@@ -275,10 +275,12 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
else
smb_en = (aux) ? 0x28 : 0x2c;
+ mutex_lock(&piix4_mutex_sb800);
outb_p(smb_en, SB800_PIIX4_SMB_IDX);
smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
outb_p(smb_en + 1, SB800_PIIX4_SMB_IDX);
smba_en_hi = inb_p(SB800_PIIX4_SMB_IDX + 1);
+ mutex_unlock(&piix4_mutex_sb800);
if (!smb_en) {
smb_en_status = smba_en_lo & 0x10;
@@ -559,7 +561,7 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
u8 port;
int retval;
- mutex_lock(adapdata->mutex);
+ mutex_lock(&piix4_mutex_sb800);
outb_p(SB800_PIIX4_PORT_IDX, SB800_PIIX4_SMB_IDX);
smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
@@ -574,7 +576,7 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
outb_p(smba_en_lo, SB800_PIIX4_SMB_IDX + 1);
- mutex_unlock(adapdata->mutex);
+ mutex_unlock(&piix4_mutex_sb800);
return retval;
}
@@ -625,6 +627,7 @@ static struct i2c_adapter *piix4_main_adapters[PIIX4_MAX_ADAPTERS];
static struct i2c_adapter *piix4_aux_adapter;
static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
+ bool sb800_main, unsigned short port,
const char *name, struct i2c_adapter **padap)
{
struct i2c_adapter *adap;
@@ -639,7 +642,8 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
- adap->algo = &smbus_algorithm;
+ adap->algo = sb800_main ? &piix4_smbus_algorithm_sb800
+ : &smbus_algorithm;
adapdata = kzalloc(sizeof(*adapdata), GFP_KERNEL);
if (adapdata == NULL) {
@@ -649,12 +653,14 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
}
adapdata->smba = smba;
+ adapdata->sb800_main = sb800_main;
+ adapdata->port = port;
/* set up the sysfs linkage to our parent device */
adap->dev.parent = &dev->dev;
snprintf(adap->name, sizeof(adap->name),
- "SMBus PIIX4 adapter %s at %04x", name, smba);
+ "SMBus PIIX4 adapter%s at %04x", name, smba);
i2c_set_adapdata(adap, adapdata);
@@ -673,30 +679,16 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba)
{
- struct mutex *mutex;
struct i2c_piix4_adapdata *adapdata;
int port;
int retval;
- mutex = kzalloc(sizeof(*mutex), GFP_KERNEL);
- if (mutex == NULL)
- return -ENOMEM;
-
- mutex_init(mutex);
-
for (port = 0; port < PIIX4_MAX_ADAPTERS; port++) {
- retval = piix4_add_adapter(dev, smba,
+ retval = piix4_add_adapter(dev, smba, true, port,
piix4_main_port_names_sb800[port],
&piix4_main_adapters[port]);
if (retval < 0)
goto error;
-
- piix4_main_adapters[port]->algo = &piix4_smbus_algorithm_sb800;
-
- adapdata = i2c_get_adapdata(piix4_main_adapters[port]);
- adapdata->sb800_main = true;
- adapdata->port = port;
- adapdata->mutex = mutex;
}
return retval;
@@ -714,19 +706,20 @@ error:
}
}
- kfree(mutex);
-
return retval;
}
static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int retval;
+ bool is_sb800 = false;
if ((dev->vendor == PCI_VENDOR_ID_ATI &&
dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
dev->revision >= 0x40) ||
dev->vendor == PCI_VENDOR_ID_AMD) {
+ is_sb800 = true;
+
if (!request_region(SB800_PIIX4_SMB_IDX, 2, "smba_idx")) {
dev_err(&dev->dev,
"SMBus base address index region 0x%x already in use!\n",
@@ -756,7 +749,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
return retval;
/* Try to register main SMBus adapter, give up if we can't */
- retval = piix4_add_adapter(dev, retval, "main",
+ retval = piix4_add_adapter(dev, retval, false, 0, "",
&piix4_main_adapters[0]);
if (retval < 0)
return retval;
@@ -783,7 +776,8 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (retval > 0) {
/* Try to add the aux adapter if it exists,
* piix4_add_adapter will clean up if this fails */
- piix4_add_adapter(dev, retval, piix4_aux_port_name_sb800,
+ piix4_add_adapter(dev, retval, false, 0,
+ is_sb800 ? piix4_aux_port_name_sb800 : "",
&piix4_aux_adapter);
}
@@ -798,10 +792,8 @@ static void piix4_adap_remove(struct i2c_adapter *adap)
i2c_del_adapter(adap);
if (adapdata->port == 0) {
release_region(adapdata->smba, SMBIOSIZE);
- if (adapdata->sb800_main) {
- kfree(adapdata->mutex);
+ if (adapdata->sb800_main)
release_region(SB800_PIIX4_SMB_IDX, 2);
- }
}
kfree(adapdata);
kfree(adap);
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index edc29b173f6c..833ea9dd4464 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -213,6 +213,7 @@ config STK8312
config STK8BA50
tristate "Sensortek STK8BA50 3-Axis Accelerometer Driver"
depends on I2C
+ depends on IIO_TRIGGER
help
Say yes here to get support for the Sensortek STK8BA50 3-axis
accelerometer.
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 605ff42c4631..283ded7747a9 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -175,6 +175,7 @@ config DA9150_GPADC
config EXYNOS_ADC
tristate "Exynos ADC driver support"
depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || (OF && COMPILE_TEST)
+ depends on HAS_IOMEM
help
Core support for the ADC block found in the Samsung EXYNOS series
of SoCs for drivers such as the touchscreen and hwmon to use to share
@@ -207,6 +208,7 @@ config INA2XX_ADC
config IMX7D_ADC
tristate "IMX7D ADC driver"
depends on ARCH_MXC || COMPILE_TEST
+ depends on HAS_IOMEM
help
Say yes here to build support for IMX7D ADC.
@@ -409,6 +411,7 @@ config TWL6030_GPADC
config VF610_ADC
tristate "Freescale vf610 ADC driver"
depends on OF
+ depends on HAS_IOMEM
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 942320e32753..c1e05532d437 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -289,7 +289,7 @@ static int tiadc_iio_buffered_hardware_setup(struct iio_dev *indio_dev,
goto error_kfifo_free;
indio_dev->setup_ops = setup_ops;
- indio_dev->modes |= INDIO_BUFFER_HARDWARE;
+ indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
return 0;
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 43d14588448d..b4dde8315210 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -300,6 +300,7 @@ static int mcp4725_probe(struct i2c_client *client,
data->client = client;
indio_dev->dev.parent = &client->dev;
+ indio_dev->name = id->name;
indio_dev->info = &mcp4725_info;
indio_dev->channels = &mcp4725_channel;
indio_dev->num_channels = 1;
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index 1165b1c4f9d6..cfc5a051ab9f 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -117,7 +117,7 @@ static int dht11_decode(struct dht11 *dht11, int offset, int timeres)
if (((hum_int + hum_dec + temp_int + temp_dec) & 0xff) != checksum)
return -EIO;
- dht11->timestamp = ktime_get_real_ns();
+ dht11->timestamp = ktime_get_boot_ns();
if (hum_int < 20) { /* DHT22 */
dht11->temperature = (((temp_int & 0x7f) << 8) + temp_dec) *
((temp_int & 0x80) ? -100 : 100);
@@ -145,7 +145,7 @@ static irqreturn_t dht11_handle_irq(int irq, void *data)
/* TODO: Consider making the handler safe for IRQ sharing */
if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
- dht11->edges[dht11->num_edges].ts = ktime_get_real_ns();
+ dht11->edges[dht11->num_edges].ts = ktime_get_boot_ns();
dht11->edges[dht11->num_edges++].value =
gpio_get_value(dht11->gpio);
@@ -164,7 +164,7 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
int ret, timeres;
mutex_lock(&dht11->lock);
- if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_real_ns()) {
+ if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_boot_ns()) {
timeres = ktime_get_resolution_ns();
if (DHT11_DATA_BIT_HIGH < 2 * timeres) {
dev_err(dht11->dev, "timeresolution %dns too low\n",
@@ -279,7 +279,7 @@ static int dht11_probe(struct platform_device *pdev)
return -EINVAL;
}
- dht11->timestamp = ktime_get_real_ns() - DHT11_DATA_VALID_TIME - 1;
+ dht11->timestamp = ktime_get_boot_ns() - DHT11_DATA_VALID_TIME - 1;
dht11->num_edges = -1;
platform_set_drvdata(pdev, iio);
diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
index cb32b593f1c5..36607d52fee0 100644
--- a/drivers/iio/imu/adis_buffer.c
+++ b/drivers/iio/imu/adis_buffer.c
@@ -43,7 +43,7 @@ int adis_update_scan_mode(struct iio_dev *indio_dev,
return -ENOMEM;
rx = adis->buffer;
- tx = rx + indio_dev->scan_bytes;
+ tx = rx + scan_count;
spi_message_init(&adis->msg);
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index 48fbc0bc7e2a..8f8d1370ed8b 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -5,9 +5,9 @@
config INV_MPU6050_IIO
tristate "Invensense MPU6050 devices"
depends on I2C && SYSFS
+ depends on I2C_MUX
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
- select I2C_MUX
help
This driver supports the Invensense MPU6050 devices.
This driver can also support MPU6500 in MPU6050 compatibility mode
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 80fbbfd76faf..734a0042de0c 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -349,6 +349,8 @@ EXPORT_SYMBOL_GPL(iio_channel_get);
void iio_channel_release(struct iio_channel *channel)
{
+ if (!channel)
+ return;
iio_device_put(channel->indio_dev);
kfree(channel);
}
diff --git a/drivers/iio/light/acpi-als.c b/drivers/iio/light/acpi-als.c
index 60537ec0c923..53201d99a16c 100644
--- a/drivers/iio/light/acpi-als.c
+++ b/drivers/iio/light/acpi-als.c
@@ -54,7 +54,9 @@ static const struct iio_chan_spec acpi_als_channels[] = {
.realbits = 32,
.storagebits = 32,
},
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ /* _RAW is here for backward ABI compatibility */
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_PROCESSED),
},
};
@@ -152,7 +154,7 @@ static int acpi_als_read_raw(struct iio_dev *indio_dev,
s32 temp_val;
int ret;
- if (mask != IIO_CHAN_INFO_RAW)
+ if ((mask != IIO_CHAN_INFO_PROCESSED) && (mask != IIO_CHAN_INFO_RAW))
return -EINVAL;
/* we support only illumination (_ALI) so far. */
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 809a961b9a7f..6bf89d8f3741 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -180,7 +180,7 @@ static const struct ltr501_samp_table ltr501_ps_samp_table[] = {
{500000, 2000000}
};
-static unsigned int ltr501_match_samp_freq(const struct ltr501_samp_table *tab,
+static int ltr501_match_samp_freq(const struct ltr501_samp_table *tab,
int len, int val, int val2)
{
int i, freq;
diff --git a/drivers/iio/pressure/mpl115.c b/drivers/iio/pressure/mpl115.c
index f5ecd6e19f5d..a0d7deeac62f 100644
--- a/drivers/iio/pressure/mpl115.c
+++ b/drivers/iio/pressure/mpl115.c
@@ -117,7 +117,7 @@ static int mpl115_read_raw(struct iio_dev *indio_dev,
*val = ret >> 6;
return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
- *val = 605;
+ *val = -605;
*val2 = 750000;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SCALE:
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 93e29fb67fa0..db35e04a0637 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -87,7 +87,7 @@ static int lidar_i2c_xfer(struct lidar_data *data, u8 reg, u8 *val, int len)
ret = i2c_transfer(client->adapter, msg, 2);
- return (ret == 2) ? 0 : ret;
+ return (ret == 2) ? 0 : -EIO;
}
static int lidar_smbus_xfer(struct lidar_data *data, u8 reg, u8 *val, int len)
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index 19837d270278..2116132568e7 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -322,6 +322,8 @@ int ib_ud_header_init(int payload_bytes,
int immediate_present,
struct ib_ud_header *header)
{
+ size_t udp_bytes = udp_present ? IB_UDP_BYTES : 0;
+
grh_present = grh_present && !ip_version;
memset(header, 0, sizeof *header);
@@ -353,7 +355,8 @@ int ib_ud_header_init(int payload_bytes,
if (ip_version == 6 || grh_present) {
header->grh.ip_version = 6;
header->grh.payload_length =
- cpu_to_be16((IB_BTH_BYTES +
+ cpu_to_be16((udp_bytes +
+ IB_BTH_BYTES +
IB_DETH_BYTES +
payload_bytes +
4 + /* ICRC */
@@ -362,8 +365,6 @@ int ib_ud_header_init(int payload_bytes,
}
if (ip_version == 4) {
- int udp_bytes = udp_present ? IB_UDP_BYTES : 0;
-
header->ip4.ver = 4; /* version 4 */
header->ip4.hdr_len = 5; /* 5 words */
header->ip4.tot_len =
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index ec737e2287fe..03c418ccbc98 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -844,6 +844,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
int err;
int i;
size_t reqlen;
+ size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
+ max_cqe_version);
if (!dev->ib_active)
return ERR_PTR(-EAGAIN);
@@ -854,7 +856,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
ver = 0;
- else if (reqlen >= sizeof(struct mlx5_ib_alloc_ucontext_req_v2))
+ else if (reqlen >= min_req_v2)
ver = 2;
else
return ERR_PTR(-EINVAL);
@@ -2214,7 +2216,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
(1ull << IB_USER_VERBS_CMD_OPEN_QP);
dev->ib_dev.uverbs_ex_cmd_mask =
- (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
+ (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
dev->ib_dev.query_device = mlx5_ib_query_device;
dev->ib_dev.query_port = mlx5_ib_query_port;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 8fb9c27485e1..9116bc3988a6 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1036,7 +1036,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
wq = MLX5_ADDR_OF(rqc, rqc, wq);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, end_padding_mode,
- MLX5_GET64(qpc, qpc, end_padding_mode));
+ MLX5_GET(qpc, qpc, end_padding_mode));
MLX5_SET(wq, wq, page_offset, MLX5_GET(qpc, qpc, page_offset));
MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
@@ -1615,15 +1615,6 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
if (pd) {
dev = to_mdev(pd->device);
- } else {
- /* being cautious here */
- if (init_attr->qp_type != IB_QPT_XRC_TGT &&
- init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
- pr_warn("%s: no PD for transport %s\n", __func__,
- ib_qp_type_str(init_attr->qp_type));
- return ERR_PTR(-EINVAL);
- }
- dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
if (!pd->uobject) {
@@ -1634,6 +1625,15 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
return ERR_PTR(-EINVAL);
}
}
+ } else {
+ /* being cautious here */
+ if (init_attr->qp_type != IB_QPT_XRC_TGT &&
+ init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
+ pr_warn("%s: no PD for transport %s\n", __func__,
+ ib_qp_type_str(init_attr->qp_type));
+ return ERR_PTR(-EINVAL);
+ }
+ dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
}
switch (init_attr->qp_type) {
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 539b0dea8034..e5e223938eec 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2049,7 +2049,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
/* Update device table */
set_dte_entry(dev_data->devid, domain, ats);
if (alias != dev_data->devid)
- set_dte_entry(dev_data->devid, domain, ats);
+ set_dte_entry(alias, domain, ats);
device_flush_dte(dev_data);
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index ac7387686ddc..986a53e3eb96 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1489,7 +1489,7 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
{
struct pci_dev *pdev;
- if (dev_is_pci(info->dev))
+ if (!dev_is_pci(info->dev))
return;
pdev = to_pci_dev(info->dev);
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 8bbcbfe7695c..381ca5a37a7b 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -25,6 +25,7 @@
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/dma-mapping.h>
#include <asm/barrier.h>
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 715923d5236c..fb50911b3940 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -159,6 +159,7 @@ config TB10X_IRQC
config TS4800_IRQ
tristate "TS-4800 IRQ controller"
select IRQ_DOMAIN
+ depends on HAS_IOMEM
help
Support for the TS-4800 FPGA IRQ controller
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index b12a5d58546f..37199b9b2cfa 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -86,7 +86,7 @@ int aic_common_set_priority(int priority, unsigned *val)
priority > AT91_AIC_IRQ_MAX_PRIORITY)
return -EINVAL;
- *val &= AT91_AIC_PRIOR;
+ *val &= ~AT91_AIC_PRIOR;
*val |= priority;
return 0;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index e23d1d18f9d6..3447549fcc93 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -875,6 +875,7 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
}
alloc_size = (1 << order) * PAGE_SIZE;
+retry_alloc_baser:
alloc_pages = (alloc_size / psz);
if (alloc_pages > GITS_BASER_PAGES_MAX) {
alloc_pages = GITS_BASER_PAGES_MAX;
@@ -938,13 +939,16 @@ retry_baser:
* size and retry. If we reach 4K, then
* something is horribly wrong...
*/
+ free_pages((unsigned long)base, order);
+ its->tables[i] = NULL;
+
switch (psz) {
case SZ_16K:
psz = SZ_4K;
- goto retry_baser;
+ goto retry_alloc_baser;
case SZ_64K:
psz = SZ_16K;
- goto retry_baser;
+ goto retry_alloc_baser;
}
}
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index c22e2d40cb30..efe50845939d 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -241,6 +241,7 @@ static int __init asm9260_of_init(struct device_node *np,
writel(0, icoll_priv.intr + i);
icoll_add_domain(np, ASM9260_NUM_IRQS);
+ set_handle_irq(icoll_handle_irq);
return 0;
}
diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c
index c71914e8f596..5dc5a760c723 100644
--- a/drivers/irqchip/irq-s3c24xx.c
+++ b/drivers/irqchip/irq-s3c24xx.c
@@ -605,7 +605,7 @@ err:
return ERR_PTR(ret);
}
-static struct s3c_irq_data init_eint[32] = {
+static struct s3c_irq_data __maybe_unused init_eint[32] = {
{ .type = S3C_IRQTYPE_NONE, }, /* reserved */
{ .type = S3C_IRQTYPE_NONE, }, /* reserved */
{ .type = S3C_IRQTYPE_NONE, }, /* reserved */
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 546d05f4358a..b2bbe8659bed 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -81,6 +81,7 @@ config STI_MBOX
config MAILBOX_TEST
tristate "Mailbox Test Client"
depends on OF
+ depends on HAS_IOMEM
help
Test client to help with testing new Controller driver
implementations.
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 45d85aea9955..8f779a1ec99c 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -81,16 +81,10 @@ static struct mbox_controller pcc_mbox_ctrl = {};
*/
static struct mbox_chan *get_pcc_channel(int id)
{
- struct mbox_chan *pcc_chan;
-
if (id < 0 || id > pcc_mbox_ctrl.num_chans)
return ERR_PTR(-ENOENT);
- pcc_chan = (struct mbox_chan *)
- (unsigned long) pcc_mbox_channels +
- (id * sizeof(*pcc_chan));
-
- return pcc_chan;
+ return &pcc_mbox_channels[id];
}
/**
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 4f22e919787a..d80cce499a56 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -210,10 +210,6 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
struct block_device *bdev;
struct mddev *mddev = bitmap->mddev;
struct bitmap_storage *store = &bitmap->storage;
- int node_offset = 0;
-
- if (mddev_is_clustered(bitmap->mddev))
- node_offset = bitmap->cluster_slot * store->file_pages;
while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
int size = PAGE_SIZE;
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 4a8e15058e8b..685aa2d77e25 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -170,7 +170,7 @@ static void add_sector(struct faulty_conf *conf, sector_t start, int mode)
conf->nfaults = n+1;
}
-static void make_request(struct mddev *mddev, struct bio *bio)
+static void faulty_make_request(struct mddev *mddev, struct bio *bio)
{
struct faulty_conf *conf = mddev->private;
int failit = 0;
@@ -226,7 +226,7 @@ static void make_request(struct mddev *mddev, struct bio *bio)
generic_make_request(bio);
}
-static void status(struct seq_file *seq, struct mddev *mddev)
+static void faulty_status(struct seq_file *seq, struct mddev *mddev)
{
struct faulty_conf *conf = mddev->private;
int n;
@@ -259,7 +259,7 @@ static void status(struct seq_file *seq, struct mddev *mddev)
}
-static int reshape(struct mddev *mddev)
+static int faulty_reshape(struct mddev *mddev)
{
int mode = mddev->new_layout & ModeMask;
int count = mddev->new_layout >> ModeShift;
@@ -299,7 +299,7 @@ static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disk
return sectors;
}
-static int run(struct mddev *mddev)
+static int faulty_run(struct mddev *mddev)
{
struct md_rdev *rdev;
int i;
@@ -327,7 +327,7 @@ static int run(struct mddev *mddev)
md_set_array_sectors(mddev, faulty_size(mddev, 0, 0));
mddev->private = conf;
- reshape(mddev);
+ faulty_reshape(mddev);
return 0;
}
@@ -344,11 +344,11 @@ static struct md_personality faulty_personality =
.name = "faulty",
.level = LEVEL_FAULTY,
.owner = THIS_MODULE,
- .make_request = make_request,
- .run = run,
+ .make_request = faulty_make_request,
+ .run = faulty_run,
.free = faulty_free,
- .status = status,
- .check_reshape = reshape,
+ .status = faulty_status,
+ .check_reshape = faulty_reshape,
.size = faulty_size,
};
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 0ded8e97751d..dd97d4245822 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -293,6 +293,7 @@ static void recover_bitmaps(struct md_thread *thread)
dlm_unlock:
dlm_unlock_sync(bm_lockres);
clear_bit:
+ lockres_free(bm_lockres);
clear_bit(slot, &cinfo->recovery_map);
}
}
@@ -682,8 +683,10 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
bm_lockres = lockres_init(mddev, str, NULL, 1);
if (!bm_lockres)
return -ENOMEM;
- if (i == (cinfo->slot_number - 1))
+ if (i == (cinfo->slot_number - 1)) {
+ lockres_free(bm_lockres);
continue;
+ }
bm_lockres->flags |= DLM_LKF_NOQUEUE;
ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
@@ -858,6 +861,7 @@ static int leave(struct mddev *mddev)
lockres_free(cinfo->token_lockres);
lockres_free(cinfo->ack_lockres);
lockres_free(cinfo->no_new_dev_lockres);
+ lockres_free(cinfo->resync_lockres);
lockres_free(cinfo->bitmap_lockres);
unlock_all_bitmaps(mddev);
dlm_release_lockspace(cinfo->lockspace, 2);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index c4b913409226..4e3843f7d245 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1044,7 +1044,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
kfree(plug);
}
-static void make_request(struct mddev *mddev, struct bio * bio)
+static void raid1_make_request(struct mddev *mddev, struct bio * bio)
{
struct r1conf *conf = mddev->private;
struct raid1_info *mirror;
@@ -1422,7 +1422,7 @@ read_again:
wake_up(&conf->wait_barrier);
}
-static void status(struct seq_file *seq, struct mddev *mddev)
+static void raid1_status(struct seq_file *seq, struct mddev *mddev)
{
struct r1conf *conf = mddev->private;
int i;
@@ -1439,7 +1439,7 @@ static void status(struct seq_file *seq, struct mddev *mddev)
seq_printf(seq, "]");
}
-static void error(struct mddev *mddev, struct md_rdev *rdev)
+static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
struct r1conf *conf = mddev->private;
@@ -2472,7 +2472,8 @@ static int init_resync(struct r1conf *conf)
* that can be installed to exclude normal IO requests.
*/
-static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
+static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
+ int *skipped)
{
struct r1conf *conf = mddev->private;
struct r1bio *r1_bio;
@@ -2890,7 +2891,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
}
static void raid1_free(struct mddev *mddev, void *priv);
-static int run(struct mddev *mddev)
+static int raid1_run(struct mddev *mddev)
{
struct r1conf *conf;
int i;
@@ -3170,15 +3171,15 @@ static struct md_personality raid1_personality =
.name = "raid1",
.level = 1,
.owner = THIS_MODULE,
- .make_request = make_request,
- .run = run,
+ .make_request = raid1_make_request,
+ .run = raid1_run,
.free = raid1_free,
- .status = status,
- .error_handler = error,
+ .status = raid1_status,
+ .error_handler = raid1_error,
.hot_add_disk = raid1_add_disk,
.hot_remove_disk= raid1_remove_disk,
.spare_active = raid1_spare_active,
- .sync_request = sync_request,
+ .sync_request = raid1_sync_request,
.resize = raid1_resize,
.size = raid1_size,
.check_reshape = raid1_reshape,
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ce959b4ae4df..1c1447dd3417 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1442,7 +1442,7 @@ retry_write:
one_write_done(r10_bio);
}
-static void make_request(struct mddev *mddev, struct bio *bio)
+static void raid10_make_request(struct mddev *mddev, struct bio *bio)
{
struct r10conf *conf = mddev->private;
sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
@@ -1484,7 +1484,7 @@ static void make_request(struct mddev *mddev, struct bio *bio)
wake_up(&conf->wait_barrier);
}
-static void status(struct seq_file *seq, struct mddev *mddev)
+static void raid10_status(struct seq_file *seq, struct mddev *mddev)
{
struct r10conf *conf = mddev->private;
int i;
@@ -1562,7 +1562,7 @@ static int enough(struct r10conf *conf, int ignore)
_enough(conf, 1, ignore);
}
-static void error(struct mddev *mddev, struct md_rdev *rdev)
+static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
struct r10conf *conf = mddev->private;
@@ -2802,7 +2802,7 @@ static int init_resync(struct r10conf *conf)
*
*/
-static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
+static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
int *skipped)
{
struct r10conf *conf = mddev->private;
@@ -3523,7 +3523,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
return ERR_PTR(err);
}
-static int run(struct mddev *mddev)
+static int raid10_run(struct mddev *mddev)
{
struct r10conf *conf;
int i, disk_idx, chunk_size;
@@ -4617,15 +4617,15 @@ static struct md_personality raid10_personality =
.name = "raid10",
.level = 10,
.owner = THIS_MODULE,
- .make_request = make_request,
- .run = run,
+ .make_request = raid10_make_request,
+ .run = raid10_run,
.free = raid10_free,
- .status = status,
- .error_handler = error,
+ .status = raid10_status,
+ .error_handler = raid10_error,
.hot_add_disk = raid10_add_disk,
.hot_remove_disk= raid10_remove_disk,
.spare_active = raid10_spare_active,
- .sync_request = sync_request,
+ .sync_request = raid10_sync_request,
.quiesce = raid10_quiesce,
.size = raid10_size,
.resize = raid10_resize,
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index a086014dcd49..b4f02c9959f2 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2496,7 +2496,7 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous)
dev->sector = raid5_compute_blocknr(sh, i, previous);
}
-static void error(struct mddev *mddev, struct md_rdev *rdev)
+static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
struct r5conf *conf = mddev->private;
@@ -2958,7 +2958,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
* If several bio share a stripe. The bio bi_phys_segments acts as a
* reference count to avoid race. The reference count should already be
* increased before this function is called (for example, in
- * make_request()), so other bio sharing this stripe will not free the
+ * raid5_make_request()), so other bio sharing this stripe will not free the
* stripe. If a stripe is owned by one stripe, the stripe lock will
* protect it.
*/
@@ -5135,7 +5135,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
}
}
-static void make_request(struct mddev *mddev, struct bio * bi)
+static void raid5_make_request(struct mddev *mddev, struct bio * bi)
{
struct r5conf *conf = mddev->private;
int dd_idx;
@@ -5225,7 +5225,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
new_sector = raid5_compute_sector(conf, logical_sector,
previous,
&dd_idx, NULL);
- pr_debug("raid456: make_request, sector %llu logical %llu\n",
+ pr_debug("raid456: raid5_make_request, sector %llu logical %llu\n",
(unsigned long long)new_sector,
(unsigned long long)logical_sector);
@@ -5575,7 +5575,8 @@ ret:
return retn;
}
-static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
+static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr,
+ int *skipped)
{
struct r5conf *conf = mddev->private;
struct stripe_head *sh;
@@ -6674,7 +6675,7 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded
return 0;
}
-static int run(struct mddev *mddev)
+static int raid5_run(struct mddev *mddev)
{
struct r5conf *conf;
int working_disks = 0;
@@ -7048,7 +7049,7 @@ static void raid5_free(struct mddev *mddev, void *priv)
mddev->to_remove = &raid5_attrs_group;
}
-static void status(struct seq_file *seq, struct mddev *mddev)
+static void raid5_status(struct seq_file *seq, struct mddev *mddev)
{
struct r5conf *conf = mddev->private;
int i;
@@ -7864,15 +7865,15 @@ static struct md_personality raid6_personality =
.name = "raid6",
.level = 6,
.owner = THIS_MODULE,
- .make_request = make_request,
- .run = run,
+ .make_request = raid5_make_request,
+ .run = raid5_run,
.free = raid5_free,
- .status = status,
- .error_handler = error,
+ .status = raid5_status,
+ .error_handler = raid5_error,
.hot_add_disk = raid5_add_disk,
.hot_remove_disk= raid5_remove_disk,
.spare_active = raid5_spare_active,
- .sync_request = sync_request,
+ .sync_request = raid5_sync_request,
.resize = raid5_resize,
.size = raid5_size,
.check_reshape = raid6_check_reshape,
@@ -7887,15 +7888,15 @@ static struct md_personality raid5_personality =
.name = "raid5",
.level = 5,
.owner = THIS_MODULE,
- .make_request = make_request,
- .run = run,
+ .make_request = raid5_make_request,
+ .run = raid5_run,
.free = raid5_free,
- .status = status,
- .error_handler = error,
+ .status = raid5_status,
+ .error_handler = raid5_error,
.hot_add_disk = raid5_add_disk,
.hot_remove_disk= raid5_remove_disk,
.spare_active = raid5_spare_active,
- .sync_request = sync_request,
+ .sync_request = raid5_sync_request,
.resize = raid5_resize,
.size = raid5_size,
.check_reshape = raid5_check_reshape,
@@ -7911,15 +7912,15 @@ static struct md_personality raid4_personality =
.name = "raid4",
.level = 4,
.owner = THIS_MODULE,
- .make_request = make_request,
- .run = run,
+ .make_request = raid5_make_request,
+ .run = raid5_run,
.free = raid5_free,
- .status = status,
- .error_handler = error,
+ .status = raid5_status,
+ .error_handler = raid5_error,
.hot_add_disk = raid5_add_disk,
.hot_remove_disk= raid5_remove_disk,
.spare_active = raid5_spare_active,
- .sync_request = sync_request,
+ .sync_request = raid5_sync_request,
.resize = raid5_resize,
.size = raid5_size,
.check_reshape = raid5_check_reshape,
diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
index 0e209b56c76c..c6abeb4fba9d 100644
--- a/drivers/media/dvb-frontends/tda1004x.c
+++ b/drivers/media/dvb-frontends/tda1004x.c
@@ -903,9 +903,18 @@ static int tda1004x_get_fe(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
struct tda1004x_state* state = fe->demodulator_priv;
+ int status;
dprintk("%s\n", __func__);
+ status = tda1004x_read_byte(state, TDA1004X_STATUS_CD);
+ if (status == -1)
+ return -EIO;
+
+ /* Only update the properties cache if device is locked */
+ if (!(status & 8))
+ return 0;
+
// inversion status
fe_params->inversion = INVERSION_OFF;
if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20)
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index 830491960add..bf82726fd3f4 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -478,7 +478,6 @@ static const struct i2c_device_id ir_kbd_id[] = {
{ "ir_rx_z8f0811_hdpvr", 0 },
{ }
};
-MODULE_DEVICE_TABLE(i2c, ir_kbd_id);
static struct i2c_driver ir_kbd_driver = {
.driver = {
diff --git a/drivers/media/i2c/s5k6a3.c b/drivers/media/i2c/s5k6a3.c
index b9e43ffa5085..cbe4711e9b31 100644
--- a/drivers/media/i2c/s5k6a3.c
+++ b/drivers/media/i2c/s5k6a3.c
@@ -144,8 +144,7 @@ static int s5k6a3_set_fmt(struct v4l2_subdev *sd,
mf = __s5k6a3_get_format(sensor, cfg, fmt->pad, fmt->which);
if (mf) {
mutex_lock(&sensor->lock);
- if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- *mf = fmt->format;
+ *mf = fmt->format;
mutex_unlock(&sensor->lock);
}
return 0;
diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
index 1d2c310ce838..94f816244407 100644
--- a/drivers/media/pci/saa7134/saa7134-alsa.c
+++ b/drivers/media/pci/saa7134/saa7134-alsa.c
@@ -1211,6 +1211,8 @@ static int alsa_device_init(struct saa7134_dev *dev)
static int alsa_device_exit(struct saa7134_dev *dev)
{
+ if (!snd_saa7134_cards[dev->nr])
+ return 1;
snd_card_free(snd_saa7134_cards[dev->nr]);
snd_saa7134_cards[dev->nr] = NULL;
@@ -1260,7 +1262,8 @@ static void saa7134_alsa_exit(void)
int idx;
for (idx = 0; idx < SNDRV_CARDS; idx++) {
- snd_card_free(snd_saa7134_cards[idx]);
+ if (snd_saa7134_cards[idx])
+ snd_card_free(snd_saa7134_cards[idx]);
}
saa7134_dmasound_init = NULL;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 526359447ff9..8b89ebe16d94 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -215,6 +215,7 @@ config VIDEO_SAMSUNG_EXYNOS_GSC
config VIDEO_STI_BDISP
tristate "STMicroelectronics BDISP 2D blitter driver"
depends on VIDEO_DEV && VIDEO_V4L2
+ depends on HAS_DMA
depends on ARCH_STI || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/exynos4-is/Kconfig
index 40423c6c5324..57d42c6172c5 100644
--- a/drivers/media/platform/exynos4-is/Kconfig
+++ b/drivers/media/platform/exynos4-is/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_SAMSUNG_EXYNOS4_IS
- bool "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver"
+ tristate "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver"
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
depends on OF && COMMON_CLK
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 49658ca39e51..979c388ebf60 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -631,6 +631,12 @@ static int fimc_is_hw_open_sensor(struct fimc_is *is,
fimc_is_mem_barrier();
+ /*
+ * Some user space use cases hang up here without this
+ * empirically chosen delay.
+ */
+ udelay(100);
+
mcuctl_write(HIC_OPEN_SENSOR, is, MCUCTL_REG_ISSR(0));
mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
mcuctl_write(sensor->drvdata->id, is, MCUCTL_REG_ISSR(2));
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
index bf9261eb57a1..c0816728cbfe 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -218,8 +218,8 @@ static void isp_video_capture_buffer_queue(struct vb2_buffer *vb)
ivb->dma_addr[i];
isp_dbg(2, &video->ve.vdev,
- "dma_buf %pad (%d/%d/%d) addr: %pad\n",
- &buf_index, ivb->index, i, vb->index,
+ "dma_buf %d (%d/%d/%d) addr: %pad\n",
+ buf_index, ivb->index, i, vb->index,
&ivb->dma_addr[i]);
}
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index f3b2dd30ec77..e79ddbb1e14f 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -186,6 +186,37 @@ error:
}
/**
+ * __fimc_pipeline_enable - enable power of all pipeline subdevs
+ * and the sensor clock
+ * @ep: video pipeline structure
+ * @fmd: fimc media device
+ *
+ * Called with the graph mutex held.
+ */
+static int __fimc_pipeline_enable(struct exynos_media_pipeline *ep,
+ struct fimc_md *fmd)
+{
+ struct fimc_pipeline *p = to_fimc_pipeline(ep);
+ int ret;
+
+ /* Enable PXLASYNC clock if this pipeline includes FIMC-IS */
+ if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP]) {
+ ret = clk_prepare_enable(fmd->wbclk[CLK_IDX_WB_B]);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = fimc_pipeline_s_power(p, 1);
+ if (!ret)
+ return 0;
+
+ if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP])
+ clk_disable_unprepare(fmd->wbclk[CLK_IDX_WB_B]);
+
+ return ret;
+}
+
+/**
* __fimc_pipeline_open - update the pipeline information, enable power
* of all pipeline subdevs and the sensor clock
* @me: media entity to start graph walk with
@@ -199,7 +230,6 @@ static int __fimc_pipeline_open(struct exynos_media_pipeline *ep,
struct fimc_md *fmd = entity_to_fimc_mdev(me);
struct fimc_pipeline *p = to_fimc_pipeline(ep);
struct v4l2_subdev *sd;
- int ret;
if (WARN_ON(p == NULL || me == NULL))
return -EINVAL;
@@ -208,24 +238,16 @@ static int __fimc_pipeline_open(struct exynos_media_pipeline *ep,
fimc_pipeline_prepare(p, me);
sd = p->subdevs[IDX_SENSOR];
- if (sd == NULL)
- return -EINVAL;
-
- /* Disable PXLASYNC clock if this pipeline includes FIMC-IS */
- if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP]) {
- ret = clk_prepare_enable(fmd->wbclk[CLK_IDX_WB_B]);
- if (ret < 0)
- return ret;
- }
-
- ret = fimc_pipeline_s_power(p, 1);
- if (!ret)
+ if (sd == NULL) {
+ pr_warn("%s(): No sensor subdev\n", __func__);
+ /*
+ * Pipeline open cannot fail so as to make it possible
+ * for the user space to configure the pipeline.
+ */
return 0;
+ }
- if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP])
- clk_disable_unprepare(fmd->wbclk[CLK_IDX_WB_B]);
-
- return ret;
+ return __fimc_pipeline_enable(ep, fmd);
}
/**
@@ -269,10 +291,43 @@ static int __fimc_pipeline_s_stream(struct exynos_media_pipeline *ep, bool on)
{ IDX_CSIS, IDX_FLITE, IDX_FIMC, IDX_SENSOR, IDX_IS_ISP },
};
struct fimc_pipeline *p = to_fimc_pipeline(ep);
+ struct fimc_md *fmd = entity_to_fimc_mdev(&p->subdevs[IDX_CSIS]->entity);
+ enum fimc_subdev_index sd_id;
int i, ret = 0;
- if (p->subdevs[IDX_SENSOR] == NULL)
- return -ENODEV;
+ if (p->subdevs[IDX_SENSOR] == NULL) {
+ if (!fmd->user_subdev_api) {
+ /*
+ * Sensor must be already discovered if we
+ * aren't in the user_subdev_api mode
+ */
+ return -ENODEV;
+ }
+
+ /* Get pipeline sink entity */
+ if (p->subdevs[IDX_FIMC])
+ sd_id = IDX_FIMC;
+ else if (p->subdevs[IDX_IS_ISP])
+ sd_id = IDX_IS_ISP;
+ else if (p->subdevs[IDX_FLITE])
+ sd_id = IDX_FLITE;
+ else
+ return -ENODEV;
+
+ /*
+ * Sensor could have been linked between open and STREAMON -
+ * check if this is the case.
+ */
+ fimc_pipeline_prepare(p, &p->subdevs[sd_id]->entity);
+
+ if (p->subdevs[IDX_SENSOR] == NULL)
+ return -ENODEV;
+
+ ret = __fimc_pipeline_enable(ep, fmd);
+ if (ret < 0)
+ return ret;
+
+ }
for (i = 0; i < IDX_MAX; i++) {
unsigned int idx = seq[on][i];
@@ -282,8 +337,10 @@ static int __fimc_pipeline_s_stream(struct exynos_media_pipeline *ep, bool on)
if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
goto error;
}
+
return 0;
error:
+ fimc_pipeline_s_power(p, !on);
for (; i >= 0; i--) {
unsigned int idx = seq[on][i];
v4l2_subdev_call(p->subdevs[idx], video, s_stream, !on);
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index c398b285180c..1af779ee3c74 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -795,7 +795,7 @@ static int isi_camera_get_formats(struct soc_camera_device *icd,
xlate->host_fmt = &isi_camera_formats[i];
xlate->code = code.code;
dev_dbg(icd->parent, "Providing format %s using code %d\n",
- isi_camera_formats[0].name, code.code);
+ xlate->host_fmt->name, xlate->code);
}
break;
default:
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index cc84c6d6a701..46c7186f7867 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -1493,6 +1493,8 @@ static void soc_camera_async_unbind(struct v4l2_async_notifier *notifier,
struct soc_camera_async_client, notifier);
struct soc_camera_device *icd = platform_get_drvdata(sasc->pdev);
+ icd->control = NULL;
+
if (icd->clk) {
v4l2_clk_unregister(icd->clk);
icd->clk = NULL;
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index 42dff9d020af..533bc796391e 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -256,7 +256,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
/* Create links. */
list_for_each_entry(entity, &vsp1->entities, list_dev) {
- if (entity->type == VSP1_ENTITY_LIF) {
+ if (entity->type == VSP1_ENTITY_WPF) {
ret = vsp1_wpf_create_links(vsp1, entity);
if (ret < 0)
goto done;
@@ -264,7 +264,10 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
ret = vsp1_rpf_create_links(vsp1, entity);
if (ret < 0)
goto done;
- } else {
+ }
+
+ if (entity->type != VSP1_ENTITY_LIF &&
+ entity->type != VSP1_ENTITY_RPF) {
ret = vsp1_create_links(vsp1, entity);
if (ret < 0)
goto done;
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 637d0d6f79fb..b4dca57d1ae3 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -515,7 +515,7 @@ static bool vsp1_pipeline_stopped(struct vsp1_pipeline *pipe)
bool stopped;
spin_lock_irqsave(&pipe->irqlock, flags);
- stopped = pipe->state == VSP1_PIPELINE_STOPPED,
+ stopped = pipe->state == VSP1_PIPELINE_STOPPED;
spin_unlock_irqrestore(&pipe->irqlock, flags);
return stopped;
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index c5d49d7a0d76..ff8953ae52d1 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1063,8 +1063,11 @@ EXPORT_SYMBOL_GPL(vb2_discard_done);
*/
static int __qbuf_mmap(struct vb2_buffer *vb, const void *pb)
{
- int ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
- vb, pb, vb->planes);
+ int ret = 0;
+
+ if (pb)
+ ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+ vb, pb, vb->planes);
return ret ? ret : call_vb_qop(vb, buf_prepare, vb);
}
@@ -1077,14 +1080,16 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const void *pb)
struct vb2_queue *q = vb->vb2_queue;
void *mem_priv;
unsigned int plane;
- int ret;
+ int ret = 0;
enum dma_data_direction dma_dir =
q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
bool reacquired = vb->planes[0].mem_priv == NULL;
memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
/* Copy relevant information provided by the userspace */
- ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, vb, pb, planes);
+ if (pb)
+ ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+ vb, pb, planes);
if (ret)
return ret;
@@ -1192,14 +1197,16 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const void *pb)
struct vb2_queue *q = vb->vb2_queue;
void *mem_priv;
unsigned int plane;
- int ret;
+ int ret = 0;
enum dma_data_direction dma_dir =
q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
bool reacquired = vb->planes[0].mem_priv == NULL;
memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
/* Copy relevant information provided by the userspace */
- ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, vb, pb, planes);
+ if (pb)
+ ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+ vb, pb, planes);
if (ret)
return ret;
@@ -1520,7 +1527,8 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
q->waiting_for_buffers = false;
vb->state = VB2_BUF_STATE_QUEUED;
- call_void_bufop(q, copy_timestamp, vb, pb);
+ if (pb)
+ call_void_bufop(q, copy_timestamp, vb, pb);
trace_vb2_qbuf(q, vb);
@@ -1532,7 +1540,8 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
__enqueue_in_driver(vb);
/* Fill buffer information for the userspace */
- call_void_bufop(q, fill_user_buffer, vb, pb);
+ if (pb)
+ call_void_bufop(q, fill_user_buffer, vb, pb);
/*
* If streamon has been called, and we haven't yet called
@@ -1731,7 +1740,8 @@ static void __vb2_dqbuf(struct vb2_buffer *vb)
* The return values from this function are intended to be directly returned
* from vidioc_dqbuf handler in driver.
*/
-int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking)
+int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
+ bool nonblocking)
{
struct vb2_buffer *vb = NULL;
int ret;
@@ -1754,8 +1764,12 @@ int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking)
call_void_vb_qop(vb, buf_finish, vb);
+ if (pindex)
+ *pindex = vb->index;
+
/* Fill buffer information for the userspace */
- call_void_bufop(q, fill_user_buffer, vb, pb);
+ if (pb)
+ call_void_bufop(q, fill_user_buffer, vb, pb);
/* Remove from videobuf queue */
list_del(&vb->queued_entry);
@@ -1828,7 +1842,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
* that's done in dqbuf, but that's not going to happen when we
* cancel the whole queue. Note: this code belongs here, not in
* __vb2_dqbuf() since in vb2_internal_dqbuf() there is a critical
- * call to __fill_v4l2_buffer() after buf_finish(). That order can't
+ * call to __fill_user_buffer() after buf_finish(). That order can't
* be changed, so we can't move the buf_finish() to __vb2_dqbuf().
*/
for (i = 0; i < q->num_buffers; ++i) {
@@ -2357,7 +2371,6 @@ struct vb2_fileio_data {
unsigned int count;
unsigned int type;
unsigned int memory;
- struct vb2_buffer *b;
struct vb2_fileio_buf bufs[VB2_MAX_FRAME];
unsigned int cur_index;
unsigned int initial_index;
@@ -2410,12 +2423,6 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
if (fileio == NULL)
return -ENOMEM;
- fileio->b = kzalloc(q->buf_struct_size, GFP_KERNEL);
- if (fileio->b == NULL) {
- kfree(fileio);
- return -ENOMEM;
- }
-
fileio->read_once = q->fileio_read_once;
fileio->write_immediately = q->fileio_write_immediately;
@@ -2460,13 +2467,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
* Queue all buffers.
*/
for (i = 0; i < q->num_buffers; i++) {
- struct vb2_buffer *b = fileio->b;
-
- memset(b, 0, q->buf_struct_size);
- b->type = q->type;
- b->memory = q->memory;
- b->index = i;
- ret = vb2_core_qbuf(q, i, b);
+ ret = vb2_core_qbuf(q, i, NULL);
if (ret)
goto err_reqbufs;
fileio->bufs[i].queued = 1;
@@ -2511,7 +2512,6 @@ static int __vb2_cleanup_fileio(struct vb2_queue *q)
q->fileio = NULL;
fileio->count = 0;
vb2_core_reqbufs(q, fileio->memory, &fileio->count);
- kfree(fileio->b);
kfree(fileio);
dprintk(3, "file io emulator closed\n");
}
@@ -2539,7 +2539,8 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
* else is able to provide this information with the write() operation.
*/
bool copy_timestamp = !read && q->copy_timestamp;
- int ret, index;
+ unsigned index;
+ int ret;
dprintk(3, "mode %s, offset %ld, count %zd, %sblocking\n",
read ? "read" : "write", (long)*ppos, count,
@@ -2564,22 +2565,20 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
*/
index = fileio->cur_index;
if (index >= q->num_buffers) {
- struct vb2_buffer *b = fileio->b;
+ struct vb2_buffer *b;
/*
* Call vb2_dqbuf to get buffer back.
*/
- memset(b, 0, q->buf_struct_size);
- b->type = q->type;
- b->memory = q->memory;
- ret = vb2_core_dqbuf(q, b, nonblock);
+ ret = vb2_core_dqbuf(q, &index, NULL, nonblock);
dprintk(5, "vb2_dqbuf result: %d\n", ret);
if (ret)
return ret;
fileio->dq_count += 1;
- fileio->cur_index = index = b->index;
+ fileio->cur_index = index;
buf = &fileio->bufs[index];
+ b = q->bufs[index];
/*
* Get number of bytes filled by the driver
@@ -2630,7 +2629,7 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
* Queue next buffer if required.
*/
if (buf->pos == buf->size || (!read && fileio->write_immediately)) {
- struct vb2_buffer *b = fileio->b;
+ struct vb2_buffer *b = q->bufs[index];
/*
* Check if this is the last buffer to read.
@@ -2643,15 +2642,11 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
/*
* Call vb2_qbuf and give buffer to the driver.
*/
- memset(b, 0, q->buf_struct_size);
- b->type = q->type;
- b->memory = q->memory;
- b->index = index;
b->planes[0].bytesused = buf->pos;
if (copy_timestamp)
b->timestamp = ktime_get_ns();
- ret = vb2_core_qbuf(q, index, b);
+ ret = vb2_core_qbuf(q, index, NULL);
dprintk(5, "vb2_dbuf result: %d\n", ret);
if (ret)
return ret;
@@ -2713,10 +2708,9 @@ static int vb2_thread(void *data)
{
struct vb2_queue *q = data;
struct vb2_threadio_data *threadio = q->threadio;
- struct vb2_fileio_data *fileio = q->fileio;
bool copy_timestamp = false;
- int prequeue = 0;
- int index = 0;
+ unsigned prequeue = 0;
+ unsigned index = 0;
int ret = 0;
if (q->is_output) {
@@ -2728,37 +2722,34 @@ static int vb2_thread(void *data)
for (;;) {
struct vb2_buffer *vb;
- struct vb2_buffer *b = fileio->b;
/*
* Call vb2_dqbuf to get buffer back.
*/
- memset(b, 0, q->buf_struct_size);
- b->type = q->type;
- b->memory = q->memory;
if (prequeue) {
- b->index = index++;
+ vb = q->bufs[index++];
prequeue--;
} else {
call_void_qop(q, wait_finish, q);
if (!threadio->stop)
- ret = vb2_core_dqbuf(q, b, 0);
+ ret = vb2_core_dqbuf(q, &index, NULL, 0);
call_void_qop(q, wait_prepare, q);
dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
+ if (!ret)
+ vb = q->bufs[index];
}
if (ret || threadio->stop)
break;
try_to_freeze();
- vb = q->bufs[b->index];
- if (b->state == VB2_BUF_STATE_DONE)
+ if (vb->state != VB2_BUF_STATE_ERROR)
if (threadio->fnc(vb, threadio->priv))
break;
call_void_qop(q, wait_finish, q);
if (copy_timestamp)
- b->timestamp = ktime_get_ns();;
+ vb->timestamp = ktime_get_ns();;
if (!threadio->stop)
- ret = vb2_core_qbuf(q, b->index, b);
+ ret = vb2_core_qbuf(q, vb->index, NULL);
call_void_qop(q, wait_prepare, q);
if (ret || threadio->stop)
break;
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index c9a28605511a..91f552124050 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -625,7 +625,7 @@ static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b,
return -EINVAL;
}
- ret = vb2_core_dqbuf(q, b, nonblocking);
+ ret = vb2_core_dqbuf(q, NULL, b, nonblocking);
return ret;
}
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 9fe33fc3c2b9..cf34681af4f6 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -1532,7 +1532,7 @@ int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
/* no PVID with ranges, otherwise it's a bug */
if (pvid)
- err = _mv88e6xxx_port_pvid_set(ds, port, vid);
+ err = _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end);
unlock:
mutex_unlock(&ps->smi_mutex);
@@ -2163,7 +2163,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
* database, and allow every port to egress frames on all other ports.
*/
reg = BIT(ps->num_ports) - 1; /* all ports */
- ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg & ~port);
+ reg &= ~BIT(port); /* except itself */
+ ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg);
if (ret)
goto abort;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index a4799c1fc7d4..5eb9b20c0eea 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -628,6 +628,7 @@ static int xgene_enet_register_irq(struct net_device *ndev)
int ret;
ring = pdata->rx_ring;
+ irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
IRQF_SHARED, ring->irq_name, ring);
if (ret)
@@ -635,6 +636,7 @@ static int xgene_enet_register_irq(struct net_device *ndev)
if (pdata->cq_cnt) {
ring = pdata->tx_ring->cp_ring;
+ irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
IRQF_SHARED, ring->irq_name, ring);
if (ret) {
@@ -649,15 +651,19 @@ static int xgene_enet_register_irq(struct net_device *ndev)
static void xgene_enet_free_irq(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata;
+ struct xgene_enet_desc_ring *ring;
struct device *dev;
pdata = netdev_priv(ndev);
dev = ndev_to_dev(ndev);
- devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring);
+ ring = pdata->rx_ring;
+ irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
+ devm_free_irq(dev, ring->irq, ring);
if (pdata->cq_cnt) {
- devm_free_irq(dev, pdata->tx_ring->cp_ring->irq,
- pdata->tx_ring->cp_ring);
+ ring = pdata->tx_ring->cp_ring;
+ irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
+ devm_free_irq(dev, ring->irq, ring);
}
}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 70d5b62c125a..248dfc40a761 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -25,6 +25,7 @@
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/efi.h>
+#include <linux/irq.h>
#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/of_net.h>
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index ecc4a334c507..f71ab2647a3b 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -302,7 +302,7 @@ static int nb8800_poll(struct napi_struct *napi, int budget)
nb8800_tx_done(dev);
again:
- while (work < budget) {
+ do {
struct nb8800_rx_buf *rxb;
unsigned int len;
@@ -330,7 +330,7 @@ again:
rxd->report = 0;
last = next;
work++;
- }
+ } while (work < budget);
if (work) {
priv->rx_descs[last].desc.config |= DESC_EOC;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 8550df189ceb..19f7cd02e085 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -151,8 +151,11 @@ config BNX2X_VXLAN
config BGMAC
tristate "BCMA bus GBit core support"
- depends on BCMA_HOST_SOC && HAS_DMA && (BCM47XX || ARCH_BCM_5301X)
+ depends on BCMA && BCMA_HOST_SOC
+ depends on HAS_DMA
+ depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST
select PHYLIB
+ select FIXED_PHY
---help---
This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
They can be found on BCM47xx SoCs and provide gigabit ethernet.
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index df835f5e46d8..5dc89e527e7d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1490,10 +1490,11 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
last = tx_buf->nr_frags;
j += 2;
- for (k = 0; k < last; k++, j = NEXT_TX(j)) {
+ for (k = 0; k < last; k++, j++) {
+ int ring_idx = j & bp->tx_ring_mask;
skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
- tx_buf = &txr->tx_buf_ring[j];
+ tx_buf = &txr->tx_buf_ring[ring_idx];
dma_unmap_page(
&pdev->dev,
dma_unmap_addr(tx_buf, mapping),
@@ -3406,7 +3407,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
u16 error_code;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, -1, -1);
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
req.ring_type = ring_type;
req.ring_id = cpu_to_le16(ring->fw_ring_id);
@@ -4819,8 +4820,6 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
- stats->rx_dropped += le64_to_cpu(hw_stats->rx_drop_pkts);
-
stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 0d775964b060..457c3bc8cfff 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -401,7 +401,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
* Ethernet MAC ISRs
*/
if (priv->internal_phy)
- priv->mii_bus->irq[phydev->mdio.addr] = PHY_IGNORE_INTERRUPT;
+ priv->phydev->irq = PHY_IGNORE_INTERRUPT;
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 9293675df7ba..49eea8981332 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12016,7 +12016,7 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
int ret;
u32 offset, len, b_offset, odd_len;
u8 *buf;
- __be32 start, end;
+ __be32 start = 0, end;
if (tg3_flag(tp, NO_NVRAM) ||
eeprom->magic != TG3_EEPROM_MAGIC)
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 9d9984a87d42..50c94104f19c 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -2823,7 +2823,7 @@ static int macb_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct device_node *phy_node;
const struct macb_config *macb_config = NULL;
- struct clk *pclk, *hclk, *tx_clk;
+ struct clk *pclk, *hclk = NULL, *tx_clk = NULL;
unsigned int queue_mask, num_queues;
struct macb_platform_data *pdata;
bool native_io;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index b89504405b72..872765527081 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1526,7 +1526,6 @@ static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
struct timespec64 *ts)
{
u64 ns;
- u32 remainder;
unsigned long flags;
struct lio *lio = container_of(ptp, struct lio, ptp_info);
struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
@@ -1536,8 +1535,7 @@ static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
ns += lio->ptp_adjust;
spin_unlock_irqrestore(&lio->ptp_lock, flags);
- ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
- ts->tv_nsec = remainder;
+ *ts = ns_to_timespec64(ns);
return 0;
}
diff --git a/drivers/net/ethernet/ezchip/Kconfig b/drivers/net/ethernet/ezchip/Kconfig
index 48ecbc8aaaea..b423ad380b6a 100644
--- a/drivers/net/ethernet/ezchip/Kconfig
+++ b/drivers/net/ethernet/ezchip/Kconfig
@@ -18,6 +18,7 @@ if NET_VENDOR_EZCHIP
config EZCHIP_NPS_MANAGEMENT_ENET
tristate "EZchip NPS management enet support"
depends on OF_IRQ && OF_NET
+ depends on HAS_IOMEM
---help---
Simple LAN device for debug or management purposes.
Device supports interrupts for RX and TX(completion).
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 4097c58d17a7..cbe21dc7e37e 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -4,6 +4,9 @@
obj-$(CONFIG_FEC) += fec.o
fec-objs :=fec_main.o fec_ptp.o
+CFLAGS_fec_main.o := -D__CHECK_ENDIAN__
+CFLAGS_fec_ptp.o := -D__CHECK_ENDIAN__
+
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 99d33e2d35e6..2106d72c91dc 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -19,8 +19,7 @@
#include <linux/timecounter.h>
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
- defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
/*
* Just figures, Motorola would have to change the offsets for
* registers in the same peripheral device on different models
@@ -190,28 +189,45 @@
/*
* Define the buffer descriptor structure.
+ *
+ * Evidently, ARM SoCs have the FEC block generated in a
+ * little endian mode so adjust endianness accordingly.
*/
-#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+#if defined(CONFIG_ARM)
+#define fec32_to_cpu le32_to_cpu
+#define fec16_to_cpu le16_to_cpu
+#define cpu_to_fec32 cpu_to_le32
+#define cpu_to_fec16 cpu_to_le16
+#define __fec32 __le32
+#define __fec16 __le16
+
struct bufdesc {
- unsigned short cbd_datlen; /* Data length */
- unsigned short cbd_sc; /* Control and status info */
- unsigned long cbd_bufaddr; /* Buffer address */
+ __fec16 cbd_datlen; /* Data length */
+ __fec16 cbd_sc; /* Control and status info */
+ __fec32 cbd_bufaddr; /* Buffer address */
};
#else
+#define fec32_to_cpu be32_to_cpu
+#define fec16_to_cpu be16_to_cpu
+#define cpu_to_fec32 cpu_to_be32
+#define cpu_to_fec16 cpu_to_be16
+#define __fec32 __be32
+#define __fec16 __be16
+
struct bufdesc {
- unsigned short cbd_sc; /* Control and status info */
- unsigned short cbd_datlen; /* Data length */
- unsigned long cbd_bufaddr; /* Buffer address */
+ __fec16 cbd_sc; /* Control and status info */
+ __fec16 cbd_datlen; /* Data length */
+ __fec32 cbd_bufaddr; /* Buffer address */
};
#endif
struct bufdesc_ex {
struct bufdesc desc;
- unsigned long cbd_esc;
- unsigned long cbd_prot;
- unsigned long cbd_bdu;
- unsigned long ts;
- unsigned short res0[4];
+ __fec32 cbd_esc;
+ __fec32 cbd_prot;
+ __fec32 cbd_bdu;
+ __fec32 ts;
+ __fec16 res0[4];
};
/*
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 502da6f48f95..41c81f6ec630 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -332,11 +332,13 @@ static void fec_dump(struct net_device *ndev)
bdp = txq->tx_bd_base;
do {
- pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n",
+ pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
index,
bdp == txq->cur_tx ? 'S' : ' ',
bdp == txq->dirty_tx ? 'H' : ' ',
- bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen,
+ fec16_to_cpu(bdp->cbd_sc),
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ fec16_to_cpu(bdp->cbd_datlen),
txq->tx_skbuff[index]);
bdp = fec_enet_get_nextdesc(bdp, fep, 0);
index++;
@@ -389,7 +391,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
bdp = fec_enet_get_nextdesc(bdp, fep, queue);
ebdp = (struct bufdesc_ex *)bdp;
- status = bdp->cbd_sc;
+ status = fec16_to_cpu(bdp->cbd_sc);
status &= ~BD_ENET_TX_STATS;
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
frag_len = skb_shinfo(skb)->frags[frag].size;
@@ -411,7 +413,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0;
- ebdp->cbd_esc = estatus;
+ ebdp->cbd_esc = cpu_to_fec32(estatus);
}
bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
@@ -435,9 +437,9 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
goto dma_mapping_error;
}
- bdp->cbd_bufaddr = addr;
- bdp->cbd_datlen = frag_len;
- bdp->cbd_sc = status;
+ bdp->cbd_bufaddr = cpu_to_fec32(addr);
+ bdp->cbd_datlen = cpu_to_fec16(frag_len);
+ bdp->cbd_sc = cpu_to_fec16(status);
}
return bdp;
@@ -445,8 +447,8 @@ dma_mapping_error:
bdp = txq->cur_tx;
for (i = 0; i < frag; i++) {
bdp = fec_enet_get_nextdesc(bdp, fep, queue);
- dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
- bdp->cbd_datlen, DMA_TO_DEVICE);
+ dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
+ fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
}
return ERR_PTR(-ENOMEM);
}
@@ -483,7 +485,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
/* Fill in a Tx ring entry */
bdp = txq->cur_tx;
last_bdp = bdp;
- status = bdp->cbd_sc;
+ status = fec16_to_cpu(bdp->cbd_sc);
status &= ~BD_ENET_TX_STATS;
/* Set buffer length and buffer pointer */
@@ -539,21 +541,21 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0;
- ebdp->cbd_esc = estatus;
+ ebdp->cbd_esc = cpu_to_fec32(estatus);
}
index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
/* Save skb pointer */
txq->tx_skbuff[index] = skb;
- bdp->cbd_datlen = buflen;
- bdp->cbd_bufaddr = addr;
+ bdp->cbd_datlen = cpu_to_fec16(buflen);
+ bdp->cbd_bufaddr = cpu_to_fec32(addr);
/* Send it on its way. Tell FEC it's ready, interrupt when done,
* it's the last BD of the frame, and to put the CRC on the end.
*/
status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
- bdp->cbd_sc = status;
+ bdp->cbd_sc = cpu_to_fec16(status);
/* If this was the last BD in the ring, start at the beginning again. */
bdp = fec_enet_get_nextdesc(last_bdp, fep, queue);
@@ -585,7 +587,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
unsigned int estatus = 0;
dma_addr_t addr;
- status = bdp->cbd_sc;
+ status = fec16_to_cpu(bdp->cbd_sc);
status &= ~BD_ENET_TX_STATS;
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
@@ -607,8 +609,8 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- bdp->cbd_datlen = size;
- bdp->cbd_bufaddr = addr;
+ bdp->cbd_datlen = cpu_to_fec16(size);
+ bdp->cbd_bufaddr = cpu_to_fec32(addr);
if (fep->bufdesc_ex) {
if (fep->quirks & FEC_QUIRK_HAS_AVB)
@@ -616,7 +618,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0;
- ebdp->cbd_esc = estatus;
+ ebdp->cbd_esc = cpu_to_fec32(estatus);
}
/* Handle the last BD specially */
@@ -625,10 +627,10 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
if (is_last) {
status |= BD_ENET_TX_INTR;
if (fep->bufdesc_ex)
- ebdp->cbd_esc |= BD_ENET_TX_INT;
+ ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
}
- bdp->cbd_sc = status;
+ bdp->cbd_sc = cpu_to_fec16(status);
return 0;
}
@@ -647,7 +649,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
unsigned short status;
unsigned int estatus = 0;
- status = bdp->cbd_sc;
+ status = fec16_to_cpu(bdp->cbd_sc);
status &= ~BD_ENET_TX_STATS;
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
@@ -671,8 +673,8 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
}
}
- bdp->cbd_bufaddr = dmabuf;
- bdp->cbd_datlen = hdr_len;
+ bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
+ bdp->cbd_datlen = cpu_to_fec16(hdr_len);
if (fep->bufdesc_ex) {
if (fep->quirks & FEC_QUIRK_HAS_AVB)
@@ -680,10 +682,10 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0;
- ebdp->cbd_esc = estatus;
+ ebdp->cbd_esc = cpu_to_fec32(estatus);
}
- bdp->cbd_sc = status;
+ bdp->cbd_sc = cpu_to_fec16(status);
return 0;
}
@@ -823,15 +825,15 @@ static void fec_enet_bd_init(struct net_device *dev)
/* Initialize the BD for every fragment in the page. */
if (bdp->cbd_bufaddr)
- bdp->cbd_sc = BD_ENET_RX_EMPTY;
+ bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
else
- bdp->cbd_sc = 0;
+ bdp->cbd_sc = cpu_to_fec16(0);
bdp = fec_enet_get_nextdesc(bdp, fep, q);
}
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep, q);
- bdp->cbd_sc |= BD_SC_WRAP;
+ bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
rxq->cur_rx = rxq->rx_bd_base;
}
@@ -844,18 +846,18 @@ static void fec_enet_bd_init(struct net_device *dev)
for (i = 0; i < txq->tx_ring_size; i++) {
/* Initialize the BD for every fragment in the page. */
- bdp->cbd_sc = 0;
+ bdp->cbd_sc = cpu_to_fec16(0);
if (txq->tx_skbuff[i]) {
dev_kfree_skb_any(txq->tx_skbuff[i]);
txq->tx_skbuff[i] = NULL;
}
- bdp->cbd_bufaddr = 0;
+ bdp->cbd_bufaddr = cpu_to_fec32(0);
bdp = fec_enet_get_nextdesc(bdp, fep, q);
}
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep, q);
- bdp->cbd_sc |= BD_SC_WRAP;
+ bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
txq->dirty_tx = bdp;
}
}
@@ -947,8 +949,10 @@ fec_restart(struct net_device *ndev)
*/
if (fep->quirks & FEC_QUIRK_ENET_MAC) {
memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
- writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
- writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
+ writel((__force u32)cpu_to_be32(temp_mac[0]),
+ fep->hwp + FEC_ADDR_LOW);
+ writel((__force u32)cpu_to_be32(temp_mac[1]),
+ fep->hwp + FEC_ADDR_HIGH);
}
/* Clear any outstanding interrupt. */
@@ -1222,7 +1226,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
while (bdp != READ_ONCE(txq->cur_tx)) {
/* Order the load of cur_tx and cbd_sc */
rmb();
- status = READ_ONCE(bdp->cbd_sc);
+ status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
if (status & BD_ENET_TX_READY)
break;
@@ -1230,10 +1234,12 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
skb = txq->tx_skbuff[index];
txq->tx_skbuff[index] = NULL;
- if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
- dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
- bdp->cbd_datlen, DMA_TO_DEVICE);
- bdp->cbd_bufaddr = 0;
+ if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ fec16_to_cpu(bdp->cbd_datlen),
+ DMA_TO_DEVICE);
+ bdp->cbd_bufaddr = cpu_to_fec32(0);
if (!skb) {
bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
continue;
@@ -1264,7 +1270,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
struct skb_shared_hwtstamps shhwtstamps;
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
- fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps);
+ fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
skb_tstamp_tx(skb, &shhwtstamps);
}
@@ -1324,10 +1330,8 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
if (off)
skb_reserve(skb, fep->rx_align + 1 - off);
- bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+ bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
+ if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
if (net_ratelimit())
netdev_err(ndev, "Rx DMA memory map failed\n");
return -ENOMEM;
@@ -1349,7 +1353,8 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
if (!new_skb)
return false;
- dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
+ dma_sync_single_for_cpu(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
FEC_ENET_RX_FRSIZE - fep->rx_align,
DMA_FROM_DEVICE);
if (!swap)
@@ -1396,7 +1401,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
*/
bdp = rxq->cur_rx;
- while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
+ while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
if (pkt_received >= budget)
break;
@@ -1438,7 +1443,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
/* Process the incoming frame. */
ndev->stats.rx_packets++;
- pkt_len = bdp->cbd_datlen;
+ pkt_len = fec16_to_cpu(bdp->cbd_datlen);
ndev->stats.rx_bytes += pkt_len;
index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep);
@@ -1456,7 +1461,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
ndev->stats.rx_dropped++;
goto rx_processing_done;
}
- dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
FEC_ENET_RX_FRSIZE - fep->rx_align,
DMA_FROM_DEVICE);
}
@@ -1475,7 +1481,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
/* If this is a VLAN packet remove the VLAN Tag */
vlan_packet_rcvd = false;
if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) {
+ fep->bufdesc_ex &&
+ (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
/* Push and remove the vlan tag */
struct vlan_hdr *vlan_header =
(struct vlan_hdr *) (data + ETH_HLEN);
@@ -1491,12 +1498,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
/* Get receive timestamp from the skb */
if (fep->hwts_rx_en && fep->bufdesc_ex)
- fec_enet_hwtstamp(fep, ebdp->ts,
+ fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
skb_hwtstamps(skb));
if (fep->bufdesc_ex &&
(fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
- if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
+ if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
/* don't check it */
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else {
@@ -1513,7 +1520,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
napi_gro_receive(&fep->napi, skb);
if (is_copybreak) {
- dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
+ dma_sync_single_for_device(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
FEC_ENET_RX_FRSIZE - fep->rx_align,
DMA_FROM_DEVICE);
} else {
@@ -1527,12 +1535,12 @@ rx_processing_done:
/* Mark the buffer empty */
status |= BD_ENET_RX_EMPTY;
- bdp->cbd_sc = status;
+ bdp->cbd_sc = cpu_to_fec16(status);
if (fep->bufdesc_ex) {
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
- ebdp->cbd_esc = BD_ENET_RX_INT;
+ ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
ebdp->cbd_prot = 0;
ebdp->cbd_bdu = 0;
}
@@ -2145,8 +2153,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
/* List of registers that can be safety be read to dump them with ethtool */
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
- defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
static u32 fec_enet_register_offset[] = {
FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
@@ -2662,7 +2669,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
rxq->rx_skbuff[i] = NULL;
if (skb) {
dma_unmap_single(&fep->pdev->dev,
- bdp->cbd_bufaddr,
+ fec32_to_cpu(bdp->cbd_bufaddr),
FEC_ENET_RX_FRSIZE - fep->rx_align,
DMA_FROM_DEVICE);
dev_kfree_skb(skb);
@@ -2777,11 +2784,11 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
}
rxq->rx_skbuff[i] = skb;
- bdp->cbd_sc = BD_ENET_RX_EMPTY;
+ bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
if (fep->bufdesc_ex) {
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
- ebdp->cbd_esc = BD_ENET_RX_INT;
+ ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
}
bdp = fec_enet_get_nextdesc(bdp, fep, queue);
@@ -2789,7 +2796,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
/* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, fep, queue);
- bdp->cbd_sc |= BD_SC_WRAP;
+ bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
return 0;
err_alloc:
@@ -2812,12 +2819,12 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
if (!txq->tx_bounce[i])
goto err_alloc;
- bdp->cbd_sc = 0;
- bdp->cbd_bufaddr = 0;
+ bdp->cbd_sc = cpu_to_fec16(0);
+ bdp->cbd_bufaddr = cpu_to_fec32(0);
if (fep->bufdesc_ex) {
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
- ebdp->cbd_esc = BD_ENET_TX_INT;
+ ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
}
bdp = fec_enet_get_nextdesc(bdp, fep, queue);
@@ -2825,7 +2832,7 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
/* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, fep, queue);
- bdp->cbd_sc |= BD_SC_WRAP;
+ bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
return 0;
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index 52e0091b4fb2..1ba359f17ec6 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -552,7 +552,7 @@ static void tx_restart(struct net_device *dev)
cbd_t __iomem *prev_bd;
cbd_t __iomem *last_tx_bd;
- last_tx_bd = fep->tx_bd_base + ((fpi->tx_ring - 1) * sizeof(cbd_t));
+ last_tx_bd = fep->tx_bd_base + (fpi->tx_ring - 1);
/* get the current bd held in TBPTR and scan back from this point */
recheck_bd = curr_tbptr = (cbd_t __iomem *)
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index b3645297477e..3bfe36f9405b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -95,21 +95,17 @@ static struct hnae_buf_ops hnae_bops = {
static int __ae_match(struct device *dev, const void *data)
{
struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
- const char *ae_id = data;
- if (!strncmp(ae_id, hdev->name, AE_NAME_SIZE))
- return 1;
-
- return 0;
+ return hdev->dev->of_node == data;
}
-static struct hnae_ae_dev *find_ae(const char *ae_id)
+static struct hnae_ae_dev *find_ae(const struct device_node *ae_node)
{
struct device *dev;
- WARN_ON(!ae_id);
+ WARN_ON(!ae_node);
- dev = class_find_device(hnae_class, NULL, ae_id, __ae_match);
+ dev = class_find_device(hnae_class, NULL, ae_node, __ae_match);
return dev ? cls_to_ae_dev(dev) : NULL;
}
@@ -316,7 +312,8 @@ EXPORT_SYMBOL(hnae_reinit_handle);
* return handle ptr or ERR_PTR
*/
struct hnae_handle *hnae_get_handle(struct device *owner_dev,
- const char *ae_id, u32 port_id,
+ const struct device_node *ae_node,
+ u32 port_id,
struct hnae_buf_ops *bops)
{
struct hnae_ae_dev *dev;
@@ -324,7 +321,7 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev,
int i, j;
int ret;
- dev = find_ae(ae_id);
+ dev = find_ae(ae_node);
if (!dev)
return ERR_PTR(-ENODEV);
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 6ca94dc3dda3..1cbcb9fa3fb5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -524,8 +524,11 @@ struct hnae_handle {
#define ring_to_dev(ring) ((ring)->q->dev->dev)
-struct hnae_handle *hnae_get_handle(struct device *owner_dev, const char *ae_id,
- u32 port_id, struct hnae_buf_ops *bops);
+struct hnae_handle *hnae_get_handle(struct device *owner_dev,
+ const struct device_node *ae_node,
+ u32 port_id,
+ struct hnae_buf_ops *bops);
+
void hnae_put_handle(struct hnae_handle *handle);
int hnae_ae_register(struct hnae_ae_dev *dev, struct module *owner);
void hnae_ae_unregister(struct hnae_ae_dev *dev);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index 522b264866b4..a0070d0e740d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -847,6 +847,7 @@ static struct hnae_ae_ops hns_dsaf_ops = {
int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev)
{
struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev;
+ static atomic_t id = ATOMIC_INIT(-1);
switch (dsaf_dev->dsaf_ver) {
case AE_VERSION_1:
@@ -858,6 +859,9 @@ int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev)
default:
break;
}
+
+ snprintf(ae_dev->name, AE_NAME_SIZE, "%s%d", DSAF_DEVICE_NAME,
+ (int)atomic_inc_return(&id));
ae_dev->ops = &hns_dsaf_ops;
ae_dev->dev = dsaf_dev->dev;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 1c33bd06bd5c..9439f04962e1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -35,7 +35,7 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
int ret, i;
u32 desc_num;
u32 buf_size;
- const char *name, *mode_str;
+ const char *mode_str;
struct device_node *np = dsaf_dev->dev->of_node;
if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1"))
@@ -43,14 +43,6 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
else
dsaf_dev->dsaf_ver = AE_VERSION_2;
- ret = of_property_read_string(np, "dsa_name", &name);
- if (ret) {
- dev_err(dsaf_dev->dev, "get dsaf name fail, ret=%d!\n", ret);
- return ret;
- }
- strncpy(dsaf_dev->ae_dev.name, name, AE_NAME_SIZE);
- dsaf_dev->ae_dev.name[AE_NAME_SIZE - 1] = '\0';
-
ret = of_property_read_string(np, "mode", &mode_str);
if (ret) {
dev_err(dsaf_dev->dev, "get dsaf mode fail, ret=%d!\n", ret);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 31c312f9826e..40205b910f80 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -18,6 +18,7 @@ struct hns_mac_cb;
#define DSAF_DRV_NAME "hns_dsaf"
#define DSAF_MOD_VERSION "v1.0"
+#define DSAF_DEVICE_NAME "dsaf"
#define HNS_DSAF_DEBUG_NW_REG_OFFSET 0x100000
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 0e30846a24f8..3f77ff77abbc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1802,7 +1802,7 @@ static int hns_nic_try_get_ae(struct net_device *ndev)
int ret;
h = hnae_get_handle(&priv->netdev->dev,
- priv->ae_name, priv->port_id, NULL);
+ priv->ae_node, priv->port_id, NULL);
if (IS_ERR_OR_NULL(h)) {
ret = PTR_ERR(h);
dev_dbg(priv->dev, "has not handle, register notifier!\n");
@@ -1880,13 +1880,16 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
else
priv->enet_ver = AE_VERSION_2;
- ret = of_property_read_string(node, "ae-name", &priv->ae_name);
- if (ret)
- goto out_read_string_fail;
+ priv->ae_node = (void *)of_parse_phandle(node, "ae-handle", 0);
+ if (IS_ERR_OR_NULL(priv->ae_node)) {
+ ret = PTR_ERR(priv->ae_node);
+ dev_err(dev, "not find ae-handle\n");
+ goto out_read_prop_fail;
+ }
ret = of_property_read_u32(node, "port-id", &priv->port_id);
if (ret)
- goto out_read_string_fail;
+ goto out_read_prop_fail;
hns_init_mac_addr(ndev);
@@ -1945,7 +1948,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
out_notify_fail:
(void)cancel_work_sync(&priv->service_task);
-out_read_string_fail:
+out_read_prop_fail:
free_netdev(ndev);
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
index 4b75270f014e..c68ab3d34fc2 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
@@ -51,7 +51,7 @@ struct hns_nic_ops {
};
struct hns_nic_priv {
- const char *ae_name;
+ const struct device_node *ae_node;
u32 enet_ver;
u32 port_id;
int phy_mode;
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index 1d5c3e16d8f4..3daf2d4a7ca0 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -194,7 +194,6 @@ static const char *hp100_isa_tbl[] = {
};
#endif
-#ifdef CONFIG_EISA
static struct eisa_device_id hp100_eisa_tbl[] = {
{ "HWPF180" }, /* HP J2577 rev A */
{ "HWP1920" }, /* HP 27248B */
@@ -205,9 +204,7 @@ static struct eisa_device_id hp100_eisa_tbl[] = {
{ "" } /* Mandatory final entry ! */
};
MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl);
-#endif
-#ifdef CONFIG_PCI
static const struct pci_device_id hp100_pci_tbl[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,},
@@ -219,7 +216,6 @@ static const struct pci_device_id hp100_pci_tbl[] = {
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, hp100_pci_tbl);
-#endif
static int hp100_rx_ratio = HP100_DEFAULT_RX_RATIO;
static int hp100_priority_tx = HP100_DEFAULT_PRIORITY_TX;
@@ -2842,7 +2838,6 @@ static void cleanup_dev(struct net_device *d)
free_netdev(d);
}
-#ifdef CONFIG_EISA
static int hp100_eisa_probe(struct device *gendev)
{
struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
@@ -2884,9 +2879,7 @@ static struct eisa_driver hp100_eisa_driver = {
.remove = hp100_eisa_remove,
}
};
-#endif
-#ifdef CONFIG_PCI
static int hp100_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -2955,7 +2948,6 @@ static struct pci_driver hp100_pci_driver = {
.probe = hp100_pci_probe,
.remove = hp100_pci_remove,
};
-#endif
/*
* module section
@@ -3032,23 +3024,17 @@ static int __init hp100_module_init(void)
err = hp100_isa_init();
if (err && err != -ENODEV)
goto out;
-#ifdef CONFIG_EISA
err = eisa_driver_register(&hp100_eisa_driver);
if (err && err != -ENODEV)
goto out2;
-#endif
-#ifdef CONFIG_PCI
err = pci_register_driver(&hp100_pci_driver);
if (err && err != -ENODEV)
goto out3;
-#endif
out:
return err;
out3:
-#ifdef CONFIG_EISA
eisa_driver_unregister (&hp100_eisa_driver);
out2:
-#endif
hp100_isa_cleanup();
goto out;
}
@@ -3057,12 +3043,8 @@ static int __init hp100_module_init(void)
static void __exit hp100_module_exit(void)
{
hp100_isa_cleanup();
-#ifdef CONFIG_EISA
eisa_driver_unregister (&hp100_eisa_driver);
-#endif
-#ifdef CONFIG_PCI
pci_unregister_driver (&hp100_pci_driver);
-#endif
}
module_init(hp100_module_init)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index bb4612c159fd..8f3b53e0dc46 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -7117,9 +7117,7 @@ static void i40e_service_task(struct work_struct *work)
i40e_watchdog_subtask(pf);
i40e_fdir_reinit_subtask(pf);
i40e_sync_filters_subtask(pf);
-#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
i40e_sync_udp_filters_subtask(pf);
-#endif
i40e_clean_adminq_subtask(pf);
i40e_service_event_complete(pf);
@@ -8515,6 +8513,8 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
}
#endif
+
+#if IS_ENABLED(CONFIG_VXLAN)
/**
* i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
* @netdev: This physical port's netdev
@@ -8524,7 +8524,6 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
static void i40e_add_vxlan_port(struct net_device *netdev,
sa_family_t sa_family, __be16 port)
{
-#if IS_ENABLED(CONFIG_VXLAN)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
@@ -8557,7 +8556,6 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
pf->pending_udp_bitmap |= BIT_ULL(next_idx);
pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
-#endif
}
/**
@@ -8569,7 +8567,6 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
static void i40e_del_vxlan_port(struct net_device *netdev,
sa_family_t sa_family, __be16 port)
{
-#if IS_ENABLED(CONFIG_VXLAN)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
@@ -8592,9 +8589,10 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
ntohs(port));
}
-#endif
}
+#endif
+#if IS_ENABLED(CONFIG_GENEVE)
/**
* i40e_add_geneve_port - Get notifications about GENEVE ports that come up
* @netdev: This physical port's netdev
@@ -8604,7 +8602,6 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
static void i40e_add_geneve_port(struct net_device *netdev,
sa_family_t sa_family, __be16 port)
{
-#if IS_ENABLED(CONFIG_GENEVE)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
@@ -8639,7 +8636,6 @@ static void i40e_add_geneve_port(struct net_device *netdev,
pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port));
-#endif
}
/**
@@ -8651,7 +8647,6 @@ static void i40e_add_geneve_port(struct net_device *netdev,
static void i40e_del_geneve_port(struct net_device *netdev,
sa_family_t sa_family, __be16 port)
{
-#if IS_ENABLED(CONFIG_GENEVE)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
@@ -8677,8 +8672,8 @@ static void i40e_del_geneve_port(struct net_device *netdev,
netdev_warn(netdev, "geneve port %d was not found, not deleting\n",
ntohs(port));
}
-#endif
}
+#endif
static int i40e_get_phys_port_id(struct net_device *netdev,
struct netdev_phys_item_id *ppid)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 720516b0e8ee..47bd8b3145a7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2313,8 +2313,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
struct iphdr *this_ip_hdr;
u32 network_hdr_len;
u8 l4_hdr = 0;
- struct udphdr *oudph;
- struct iphdr *oiph;
+ struct udphdr *oudph = NULL;
+ struct iphdr *oiph = NULL;
u32 l4_tunnel = 0;
if (skb->encapsulation) {
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index a0c03834a2f7..55831188bc32 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -762,10 +762,10 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
if (length <= 8 && (uintptr_t)data & 0x7) {
/* Copy unaligned small data fragment to TSO header data area */
- memcpy(txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE,
+ memcpy(txq->tso_hdrs + tx_index * TSO_HEADER_SIZE,
data, length);
desc->buf_ptr = txq->tso_hdrs_dma
- + txq->tx_curr_desc * TSO_HEADER_SIZE;
+ + tx_index * TSO_HEADER_SIZE;
} else {
/* Alignment is okay, map buffer and hand off to hardware */
txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index fabc8df40392..662c2ee268c7 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -11,28 +11,28 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
#include <linux/etherdevice.h>
-#include <linux/platform_device.h>
-#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
#include <linux/inetdevice.h>
-#include <linux/mbus.h>
-#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/if_vlan.h>
-#include <net/ip.h>
-#include <net/ipv6.h>
#include <linux/io.h>
-#include <net/tso.h>
+#include <linux/kernel.h>
+#include <linux/mbus.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/of_address.h>
#include <linux/phy.h>
-#include <linux/clk.h>
-#include <linux/cpu.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/tso.h>
/* Registers */
#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
@@ -373,6 +373,8 @@ struct mvneta_port {
/* Core clock */
struct clk *clk;
+ /* AXI clock */
+ struct clk *clk_bus;
u8 mcast_count[256];
u16 tx_ring_size;
u16 rx_ring_size;
@@ -3242,26 +3244,25 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
const struct mvneta_statistic *s;
void __iomem *base = pp->base;
u32 high, low, val;
+ u64 val64;
int i;
for (i = 0, s = mvneta_statistics;
s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
s++, i++) {
- val = 0;
-
switch (s->type) {
case T_REG_32:
val = readl_relaxed(base + s->offset);
+ pp->ethtool_stats[i] += val;
break;
case T_REG_64:
/* Docs say to read low 32-bit then high */
low = readl_relaxed(base + s->offset);
high = readl_relaxed(base + s->offset + 4);
- val = (u64)high << 32 | low;
+ val64 = (u64)high << 32 | low;
+ pp->ethtool_stats[i] += val64;
break;
}
-
- pp->ethtool_stats[i] += val;
}
}
@@ -3605,7 +3606,9 @@ static int mvneta_probe(struct platform_device *pdev)
pp->indir[0] = rxq_def;
- pp->clk = devm_clk_get(&pdev->dev, NULL);
+ pp->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(pp->clk))
+ pp->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pp->clk)) {
err = PTR_ERR(pp->clk);
goto err_put_phy_node;
@@ -3613,6 +3616,10 @@ static int mvneta_probe(struct platform_device *pdev)
clk_prepare_enable(pp->clk);
+ pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
+ if (!IS_ERR(pp->clk_bus))
+ clk_prepare_enable(pp->clk_bus);
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pp->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pp->base)) {
@@ -3724,6 +3731,7 @@ err_free_stats:
err_free_ports:
free_percpu(pp->ports);
err_clk:
+ clk_disable_unprepare(pp->clk_bus);
clk_disable_unprepare(pp->clk);
err_put_phy_node:
of_node_put(phy_node);
@@ -3741,6 +3749,7 @@ static int mvneta_remove(struct platform_device *pdev)
struct mvneta_port *pp = netdev_priv(dev);
unregister_netdev(dev);
+ clk_disable_unprepare(pp->clk_bus);
clk_disable_unprepare(pp->clk);
free_percpu(pp->ports);
free_percpu(pp->stats);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 0c5237264e3e..bb77e2207804 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -1044,6 +1044,92 @@ static inline void mlxsw_reg_sftr_pack(char *payload,
mlxsw_reg_sftr_port_mask_set(payload, port, 1);
}
+/* SFDF - Switch Filtering DB Flush
+ * --------------------------------
+ * The switch filtering DB flush register is used to flush the FDB.
+ * Note that FDB notifications are flushed as well.
+ */
+#define MLXSW_REG_SFDF_ID 0x2013
+#define MLXSW_REG_SFDF_LEN 0x14
+
+static const struct mlxsw_reg_info mlxsw_reg_sfdf = {
+ .id = MLXSW_REG_SFDF_ID,
+ .len = MLXSW_REG_SFDF_LEN,
+};
+
+/* reg_sfdf_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfdf, swid, 0x00, 24, 8);
+
+enum mlxsw_reg_sfdf_flush_type {
+ MLXSW_REG_SFDF_FLUSH_PER_SWID,
+ MLXSW_REG_SFDF_FLUSH_PER_FID,
+ MLXSW_REG_SFDF_FLUSH_PER_PORT,
+ MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID,
+ MLXSW_REG_SFDF_FLUSH_PER_LAG,
+ MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID,
+};
+
+/* reg_sfdf_flush_type
+ * Flush type.
+ * 0 - All SWID dynamic entries are flushed.
+ * 1 - All FID dynamic entries are flushed.
+ * 2 - All dynamic entries pointing to port are flushed.
+ * 3 - All FID dynamic entries pointing to port are flushed.
+ * 4 - All dynamic entries pointing to LAG are flushed.
+ * 5 - All FID dynamic entries pointing to LAG are flushed.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, flush_type, 0x04, 28, 4);
+
+/* reg_sfdf_flush_static
+ * Static.
+ * 0 - Flush only dynamic entries.
+ * 1 - Flush both dynamic and static entries.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, flush_static, 0x04, 24, 1);
+
+static inline void mlxsw_reg_sfdf_pack(char *payload,
+ enum mlxsw_reg_sfdf_flush_type type)
+{
+ MLXSW_REG_ZERO(sfdf, payload);
+ mlxsw_reg_sfdf_flush_type_set(payload, type);
+ mlxsw_reg_sfdf_flush_static_set(payload, true);
+}
+
+/* reg_sfdf_fid
+ * FID to flush.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, fid, 0x0C, 0, 16);
+
+/* reg_sfdf_system_port
+ * Port to flush.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, system_port, 0x0C, 0, 16);
+
+/* reg_sfdf_port_fid_system_port
+ * Port to flush, pointed to by FID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, port_fid_system_port, 0x08, 0, 16);
+
+/* reg_sfdf_lag_id
+ * LAG ID to flush.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, lag_id, 0x0C, 0, 10);
+
+/* reg_sfdf_lag_fid_lag_id
+ * LAG ID to flush, pointed to by FID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, lag_fid_lag_id, 0x08, 0, 10);
+
/* SLDR - Switch LAG Descriptor Register
* -----------------------------------------
* The switch LAG descriptor register is populated by LAG descriptors.
@@ -1701,20 +1787,20 @@ MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
* Module number.
* Access: RW
*/
-MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0, false);
+MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0x00, false);
/* reg_pmlp_tx_lane
* Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
* Access: RW
*/
-MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 16, false);
+MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 0x00, false);
/* reg_pmlp_rx_lane
* Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is
* equal to Tx lane.
* Access: RW
*/
-MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 24, false);
+MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 0x00, false);
static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
{
@@ -3121,6 +3207,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "SFGC";
case MLXSW_REG_SFTR_ID:
return "SFTR";
+ case MLXSW_REG_SFDF_ID:
+ return "SFDF";
case MLXSW_REG_SLDR_ID:
return "SLDR";
case MLXSW_REG_SLCR_ID:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index ce6845d534a8..217856bdd400 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1979,6 +1979,115 @@ static struct mlxsw_driver mlxsw_sp_driver = {
.profile = &mlxsw_sp_config_profile,
};
+static int
+mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char sfdf_pl[MLXSW_REG_SFDF_LEN];
+
+ mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
+ mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
+}
+
+static int
+mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char sfdf_pl[MLXSW_REG_SFDF_LEN];
+
+ mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
+ mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
+ mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
+ mlxsw_sp_port->local_port);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
+}
+
+static int
+mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char sfdf_pl[MLXSW_REG_SFDF_LEN];
+
+ mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
+ mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
+}
+
+static int
+mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char sfdf_pl[MLXSW_REG_SFDF_LEN];
+
+ mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
+ mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
+ mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
+}
+
+static int
+__mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err, last_err = 0;
+ u16 vid;
+
+ for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
+ err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
+ if (err)
+ last_err = err;
+ }
+
+ return last_err;
+}
+
+static int
+__mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err, last_err = 0;
+ u16 vid;
+
+ for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
+ err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
+ if (err)
+ last_err = err;
+ }
+
+ return last_err;
+}
+
+static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ if (!list_empty(&mlxsw_sp_port->vports_list))
+ if (mlxsw_sp_port->lagged)
+ return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
+ else
+ return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
+ else
+ if (mlxsw_sp_port->lagged)
+ return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
+ else
+ return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
+}
+
+static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+ u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
+ u16 fid = mlxsw_sp_vfid_to_fid(vfid);
+
+ if (mlxsw_sp_vport->lagged)
+ return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
+ fid);
+ else
+ return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
+}
+
static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
{
return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
@@ -2006,10 +2115,14 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
return 0;
}
-static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
+static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool flush_fdb)
{
struct net_device *dev = mlxsw_sp_port->dev;
+ if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
+ netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
+
mlxsw_sp_port->learning = 0;
mlxsw_sp_port->learning_sync = 0;
mlxsw_sp_port->uc_flood = 0;
@@ -2200,10 +2313,15 @@ err_col_port_enable:
return err;
}
+static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct net_device *br_dev,
+ bool flush_fdb);
+
static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *lag_dev)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_port *mlxsw_sp_vport;
struct mlxsw_sp_upper *lag;
u16 lag_id = mlxsw_sp_port->lag_id;
int err;
@@ -2220,7 +2338,32 @@ static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
return err;
+ /* In case we leave a LAG device that has bridges built on top,
+ * then their teardown sequence is never issued and we need to
+ * invoke the necessary cleanup routines ourselves.
+ */
+ list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
+ vport.list) {
+ struct net_device *br_dev;
+
+ if (!mlxsw_sp_vport->bridged)
+ continue;
+
+ br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
+ mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
+ }
+
+ if (mlxsw_sp_port->bridged) {
+ mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
+
+ if (lag->ref_count == 1)
+ mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
+ }
+
if (lag->ref_count == 1) {
+ if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
+ netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
if (err)
return err;
@@ -2272,9 +2415,6 @@ static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
}
-static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *br_dev);
-
static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *vlan_dev)
{
@@ -2312,7 +2452,7 @@ static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *br_dev;
br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
- mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev);
+ mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
}
mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
@@ -2374,7 +2514,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
}
mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
} else {
- err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
+ err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
+ true);
mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
if (err) {
netdev_err(dev, "Failed to leave bridge\n");
@@ -2541,7 +2682,8 @@ static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *br_dev)
+ struct net_device *br_dev,
+ bool flush_fdb)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
@@ -2604,6 +2746,9 @@ static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
goto err_vport_flood_set;
}
+ if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
+ netdev_err(dev, "Failed to flush FDB\n");
+
/* Switch between the vFIDs and destroy the old one if needed. */
new_vfid->nr_vports++;
mlxsw_sp_vport->vport.vfid = new_vfid;
@@ -2777,7 +2922,7 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
if (!mlxsw_sp_vport)
return NOTIFY_DONE;
err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
- upper_dev);
+ upper_dev, true);
if (err) {
netdev_err(dev, "Failed to leave bridge\n");
return NOTIFY_BAD;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index a23dc610d259..7f42eb1c320e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -120,7 +120,6 @@ struct mlxsw_sp {
} fdb_notify;
#define MLXSW_SP_DEFAULT_AGEING_TIME 300
u32 ageing_time;
- struct mutex fdb_lock; /* Make sure FDB sessions are atomic. */
struct mlxsw_sp_upper master_bridge;
struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
};
@@ -254,5 +253,6 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
__be16 __always_unused proto, u16 vid);
int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
bool set, bool only_uc);
+void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 45479ef5bcf4..e492ca2cdecd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -45,6 +45,7 @@
#include <linux/if_bridge.h>
#include <linux/workqueue.h>
#include <linux/jiffies.h>
+#include <linux/rtnetlink.h>
#include <net/switchdev.h>
#include "spectrum.h"
@@ -124,14 +125,14 @@ static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
int err;
switch (state) {
- case BR_STATE_DISABLED: /* fall-through */
case BR_STATE_FORWARDING:
spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
break;
- case BR_STATE_LISTENING: /* fall-through */
case BR_STATE_LEARNING:
spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
break;
+ case BR_STATE_LISTENING: /* fall-through */
+ case BR_STATE_DISABLED: /* fall-through */
case BR_STATE_BLOCKING:
spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
break;
@@ -936,6 +937,14 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
vlan->vid_begin, vlan->vid_end, false);
}
+void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ u16 vid;
+
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
+ __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false);
+}
+
static int
mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_fdb *fdb)
@@ -1040,10 +1049,12 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_obj_port_fdb *fdb,
- switchdev_obj_dump_cb_t *cb)
+ switchdev_obj_dump_cb_t *cb,
+ struct net_device *orig_dev)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u16 vport_vid = 0, vport_fid = 0;
+ struct mlxsw_sp_port *tmp;
+ u16 vport_fid = 0;
char *sfd_pl;
char mac[ETH_ALEN];
u16 fid;
@@ -1058,13 +1069,11 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
if (!sfd_pl)
return -ENOMEM;
- mutex_lock(&mlxsw_sp_port->mlxsw_sp->fdb_lock);
if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
u16 tmp;
tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
vport_fid = mlxsw_sp_vfid_to_fid(tmp);
- vport_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
}
mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
@@ -1088,12 +1097,13 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
&local_port);
if (local_port == mlxsw_sp_port->local_port) {
- if (vport_fid && vport_fid != fid)
- continue;
- else if (vport_fid)
- fdb->vid = vport_vid;
- else
+ if (vport_fid && vport_fid == fid)
+ fdb->vid = 0;
+ else if (!vport_fid &&
+ !mlxsw_sp_fid_is_vfid(fid))
fdb->vid = fid;
+ else
+ continue;
ether_addr_copy(fdb->addr, mac);
fdb->ndm_state = NUD_REACHABLE;
err = cb(&fdb->obj);
@@ -1104,14 +1114,22 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
mac, &fid, &lag_id);
- if (mlxsw_sp_port ==
- mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id)) {
- if (vport_fid && vport_fid != fid)
+ tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
+ if (tmp && tmp->local_port ==
+ mlxsw_sp_port->local_port) {
+ /* LAG records can only point to LAG
+ * devices or VLAN devices on top.
+ */
+ if (!netif_is_lag_master(orig_dev) &&
+ !is_vlan_dev(orig_dev))
continue;
- else if (vport_fid)
- fdb->vid = vport_vid;
- else
+ if (vport_fid && vport_fid == fid)
+ fdb->vid = 0;
+ else if (!vport_fid &&
+ !mlxsw_sp_fid_is_vfid(fid))
fdb->vid = fid;
+ else
+ continue;
ether_addr_copy(fdb->addr, mac);
fdb->ndm_state = NUD_REACHABLE;
err = cb(&fdb->obj);
@@ -1124,7 +1142,6 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
} while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
out:
- mutex_unlock(&mlxsw_sp_port->mlxsw_sp->fdb_lock);
kfree(sfd_pl);
return stored_err ? stored_err : err;
}
@@ -1176,7 +1193,8 @@ static int mlxsw_sp_port_obj_dump(struct net_device *dev,
break;
case SWITCHDEV_OBJ_ID_PORT_FDB:
err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
- SWITCHDEV_OBJ_PORT_FDB(obj), cb);
+ SWITCHDEV_OBJ_PORT_FDB(obj), cb,
+ obj->orig_dev);
break;
default:
err = -EOPNOTSUPP;
@@ -1194,14 +1212,14 @@ static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
.switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
};
-static void mlxsw_sp_fdb_call_notifiers(bool learning, bool learning_sync,
- bool adding, char *mac, u16 vid,
+static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding,
+ char *mac, u16 vid,
struct net_device *dev)
{
struct switchdev_notifier_fdb_info info;
unsigned long notifier_type;
- if (learning && learning_sync) {
+ if (learning_sync) {
info.addr = mac;
info.vid = vid;
notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
@@ -1237,7 +1255,7 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
goto just_remove;
}
- vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
+ vid = 0;
/* Override the physical port with the vPort. */
mlxsw_sp_port = mlxsw_sp_vport;
} else {
@@ -1257,8 +1275,7 @@ do_fdb_op:
if (!do_notification)
return;
- mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning,
- mlxsw_sp_port->learning_sync,
+ mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync,
adding, mac, vid, mlxsw_sp_port->dev);
return;
@@ -1273,6 +1290,7 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
bool adding)
{
struct mlxsw_sp_port *mlxsw_sp_port;
+ struct net_device *dev;
char mac[ETH_ALEN];
u16 lag_vid = 0;
u16 lag_id;
@@ -1298,11 +1316,13 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
goto just_remove;
}
- vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
- lag_vid = vid;
+ lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
+ dev = mlxsw_sp_vport->dev;
+ vid = 0;
/* Override the physical port with the vPort. */
mlxsw_sp_port = mlxsw_sp_vport;
} else {
+ dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev;
vid = fid;
}
@@ -1319,10 +1339,8 @@ do_fdb_op:
if (!do_notification)
return;
- mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning,
- mlxsw_sp_port->learning_sync,
- adding, mac, vid,
- mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev);
+ mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac,
+ vid, dev);
return;
just_remove:
@@ -1374,7 +1392,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
- mutex_lock(&mlxsw_sp->fdb_lock);
+ rtnl_lock();
do {
mlxsw_reg_sfn_pack(sfn_pl);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
@@ -1387,7 +1405,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
} while (num_rec);
- mutex_unlock(&mlxsw_sp->fdb_lock);
+ rtnl_unlock();
kfree(sfn_pl);
mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
@@ -1402,7 +1420,6 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
return err;
}
- mutex_init(&mlxsw_sp->fdb_lock);
INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index a10c928bbd6b..00cfd95ca59d 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -28,6 +28,16 @@
#include "moxart_ether.h"
+static inline void moxart_desc_write(u32 data, u32 *desc)
+{
+ *desc = cpu_to_le32(data);
+}
+
+static inline u32 moxart_desc_read(u32 *desc)
+{
+ return le32_to_cpu(*desc);
+}
+
static inline void moxart_emac_write(struct net_device *ndev,
unsigned int reg, unsigned long value)
{
@@ -112,7 +122,7 @@ static void moxart_mac_enable(struct net_device *ndev)
static void moxart_mac_setup_desc_ring(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
- void __iomem *desc;
+ void *desc;
int i;
for (i = 0; i < TX_DESC_NUM; i++) {
@@ -121,7 +131,7 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
priv->tx_buf[i] = priv->tx_buf_base + priv->tx_buf_size * i;
}
- writel(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1);
+ moxart_desc_write(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1);
priv->tx_head = 0;
priv->tx_tail = 0;
@@ -129,8 +139,8 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
for (i = 0; i < RX_DESC_NUM; i++) {
desc = priv->rx_desc_base + i * RX_REG_DESC_SIZE;
memset(desc, 0, RX_REG_DESC_SIZE);
- writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
- writel(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK,
+ moxart_desc_write(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
+ moxart_desc_write(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK,
desc + RX_REG_OFFSET_DESC1);
priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
@@ -141,12 +151,12 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
netdev_err(ndev, "DMA mapping error\n");
- writel(priv->rx_mapping[i],
+ moxart_desc_write(priv->rx_mapping[i],
desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_PHYS);
- writel(priv->rx_buf[i],
+ moxart_desc_write((uintptr_t)priv->rx_buf[i],
desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_VIRT);
}
- writel(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1);
+ moxart_desc_write(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1);
priv->rx_head = 0;
@@ -201,14 +211,15 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
napi);
struct net_device *ndev = priv->ndev;
struct sk_buff *skb;
- void __iomem *desc;
+ void *desc;
unsigned int desc0, len;
int rx_head = priv->rx_head;
int rx = 0;
while (rx < budget) {
desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head);
- desc0 = readl(desc + RX_REG_OFFSET_DESC0);
+ desc0 = moxart_desc_read(desc + RX_REG_OFFSET_DESC0);
+ rmb(); /* ensure desc0 is up to date */
if (desc0 & RX_DESC0_DMA_OWN)
break;
@@ -250,7 +261,8 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
priv->stats.multicast++;
rx_next:
- writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
+ wmb(); /* prevent setting ownership back too early */
+ moxart_desc_write(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
rx_head = RX_NEXT(rx_head);
priv->rx_head = rx_head;
@@ -310,7 +322,7 @@ static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
- void __iomem *desc;
+ void *desc;
unsigned int len;
unsigned int tx_head = priv->tx_head;
u32 txdes1;
@@ -319,11 +331,12 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
spin_lock_irq(&priv->txlock);
- if (readl(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
+ if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
net_dbg_ratelimited("no TX space for packet\n");
priv->stats.tx_dropped++;
goto out_unlock;
}
+ rmb(); /* ensure data is only read that had TX_DESC0_DMA_OWN cleared */
len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
@@ -337,9 +350,9 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
priv->tx_len[tx_head] = len;
priv->tx_skb[tx_head] = skb;
- writel(priv->tx_mapping[tx_head],
+ moxart_desc_write(priv->tx_mapping[tx_head],
desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_PHYS);
- writel(skb->data,
+ moxart_desc_write((uintptr_t)skb->data,
desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_VIRT);
if (skb->len < ETH_ZLEN) {
@@ -354,8 +367,9 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
if (tx_head == TX_DESC_NUM_MASK)
txdes1 |= TX_DESC1_END;
- writel(txdes1, desc + TX_REG_OFFSET_DESC1);
- writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
+ moxart_desc_write(txdes1, desc + TX_REG_OFFSET_DESC1);
+ wmb(); /* flush descriptor before transferring ownership */
+ moxart_desc_write(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
/* start to send packet */
writel(0xffffffff, priv->base + REG_TX_POLL_DEMAND);
diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h
index 2be9280d608c..93a9563ac7c6 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.h
+++ b/drivers/net/ethernet/moxa/moxart_ether.h
@@ -300,7 +300,7 @@ struct moxart_mac_priv_t {
dma_addr_t rx_base;
dma_addr_t rx_mapping[RX_DESC_NUM];
- void __iomem *rx_desc_base;
+ void *rx_desc_base;
unsigned char *rx_buf_base;
unsigned char *rx_buf[RX_DESC_NUM];
unsigned int rx_head;
@@ -308,7 +308,7 @@ struct moxart_mac_priv_t {
dma_addr_t tx_base;
dma_addr_t tx_mapping[TX_DESC_NUM];
- void __iomem *tx_desc_base;
+ void *tx_desc_base;
unsigned char *tx_buf_base;
unsigned char *tx_buf[RX_DESC_NUM];
unsigned int tx_head;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 50d5604833ed..e0993eba5df3 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -2223,8 +2223,6 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
return IRQ_NONE;
}
-#ifdef CONFIG_PCI_MSI
-
static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
{
struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
@@ -2442,16 +2440,13 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
if (vdev->config.intr_type == MSI_X)
pci_disable_msix(vdev->pdev);
}
-#endif
static void vxge_rem_isr(struct vxgedev *vdev)
{
-#ifdef CONFIG_PCI_MSI
- if (vdev->config.intr_type == MSI_X) {
+ if (IS_ENABLED(CONFIG_PCI_MSI) &&
+ vdev->config.intr_type == MSI_X) {
vxge_rem_msix_isr(vdev);
- } else
-#endif
- if (vdev->config.intr_type == INTA) {
+ } else if (vdev->config.intr_type == INTA) {
synchronize_irq(vdev->pdev->irq);
free_irq(vdev->pdev->irq, vdev);
}
@@ -2460,11 +2455,10 @@ static void vxge_rem_isr(struct vxgedev *vdev)
static int vxge_add_isr(struct vxgedev *vdev)
{
int ret = 0;
-#ifdef CONFIG_PCI_MSI
int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
int pci_fun = PCI_FUNC(vdev->pdev->devfn);
- if (vdev->config.intr_type == MSI_X)
+ if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X)
ret = vxge_enable_msix(vdev);
if (ret) {
@@ -2475,7 +2469,7 @@ static int vxge_add_isr(struct vxgedev *vdev)
vdev->config.intr_type = INTA;
}
- if (vdev->config.intr_type == MSI_X) {
+ if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X) {
for (intr_idx = 0;
intr_idx < (vdev->no_of_vpath *
VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
@@ -2576,9 +2570,8 @@ static int vxge_add_isr(struct vxgedev *vdev)
vdev->vxge_entries[intr_cnt].in_use = 1;
vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
}
-INTA_MODE:
-#endif
+INTA_MODE:
if (vdev->config.intr_type == INTA) {
snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
"%s:vxge:INTA", vdev->ndev->name);
@@ -3889,12 +3882,12 @@ static void vxge_device_config_init(struct vxge_hw_device_config *device_config,
if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
-#ifndef CONFIG_PCI_MSI
- vxge_debug_init(VXGE_ERR,
- "%s: This Kernel does not support "
- "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
- *intr_type = INTA;
-#endif
+ if (!IS_ENABLED(CONFIG_PCI_MSI)) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: This Kernel does not support "
+ "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
+ *intr_type = INTA;
+ }
/* Configure whether MSI-X or IRQL. */
switch (*intr_type) {
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index a4ab71d43e4e..166a7fc87e2f 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -3531,12 +3531,14 @@ static void rocker_port_fdb_learn_work(struct work_struct *work)
info.addr = lw->addr;
info.vid = lw->vid;
+ rtnl_lock();
if (learned && removing)
call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
lw->rocker_port->dev, &info.info);
else if (learned && !removing)
call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
lw->rocker_port->dev, &info.info);
+ rtnl_unlock();
rocker_port_kfree(lw->trans, work);
}
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index cc106d892e29..23fa29877f5b 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -389,17 +389,27 @@ static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
if (vio_version_after_eq(&port->vio, 1, 8)) {
struct vio_net_dext *dext = vio_net_ext(desc);
+ skb_reset_network_header(skb);
+
if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) {
if (skb->protocol == ETH_P_IP) {
- struct iphdr *iph = (struct iphdr *)skb->data;
+ struct iphdr *iph = ip_hdr(skb);
iph->check = 0;
ip_send_check(iph);
}
}
if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) &&
- skb->ip_summed == CHECKSUM_NONE)
- vnet_fullcsum(skb);
+ skb->ip_summed == CHECKSUM_NONE) {
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ int ihl = iph->ihl * 4;
+
+ skb_reset_transport_header(skb);
+ skb_set_transport_header(skb, ihl);
+ vnet_fullcsum(skb);
+ }
+ }
if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) {
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_level = 0;
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 657b65bf5cac..18bf3a8fdc50 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -82,7 +82,7 @@ struct cpdma_desc {
struct cpdma_desc_pool {
phys_addr_t phys;
- u32 hw_addr;
+ dma_addr_t hw_addr;
void __iomem *iomap; /* ioremap map */
void *cpumap; /* dma_alloc map */
int desc_size, mem_size;
@@ -152,7 +152,7 @@ struct cpdma_chan {
* abstract out these details
*/
static struct cpdma_desc_pool *
-cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
+cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
int size, int align)
{
int bitmap_size;
@@ -176,13 +176,13 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
if (phys) {
pool->phys = phys;
- pool->iomap = ioremap(phys, size);
+ pool->iomap = ioremap(phys, size); /* should be memremap? */
pool->hw_addr = hw_addr;
} else {
- pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
+ pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr,
GFP_KERNEL);
- pool->iomap = pool->cpumap;
- pool->hw_addr = pool->phys;
+ pool->iomap = (void __iomem __force *)pool->cpumap;
+ pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
}
if (pool->iomap)
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 7f975a2c8990..b0de8ecd7fe8 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -533,8 +533,8 @@ static int dfx_register(struct device *bdev)
const char *print_name = dev_name(bdev);
struct net_device *dev;
DFX_board_t *bp; /* board pointer */
- resource_size_t bar_start[3]; /* pointers to ports */
- resource_size_t bar_len[3]; /* resource length */
+ resource_size_t bar_start[3] = {0}; /* pointers to ports */
+ resource_size_t bar_len[3] = {0}; /* resource length */
int alloc_size; /* total buffer size used */
struct resource *region;
int err = 0;
@@ -3697,8 +3697,8 @@ static void dfx_unregister(struct device *bdev)
int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
- resource_size_t bar_start[3]; /* pointers to ports */
- resource_size_t bar_len[3]; /* resource lengths */
+ resource_size_t bar_start[3] = {0}; /* pointers to ports */
+ resource_size_t bar_len[3] = {0}; /* resource lengths */
int alloc_size; /* total buffer size used */
unregister_netdev(dev);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 7456569f53c1..0b14ac3b8d11 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -980,9 +980,9 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
opts = ip_tunnel_info_opts(info);
if (key->tun_flags & TUNNEL_CSUM)
- flags |= GENEVE_F_UDP_CSUM;
+ flags &= ~GENEVE_F_UDP_ZERO_CSUM6_TX;
else
- flags &= ~GENEVE_F_UDP_CSUM;
+ flags |= GENEVE_F_UDP_ZERO_CSUM6_TX;
err = geneve6_build_skb(dst, skb, key->tun_flags, vni,
info->options_len, opts,
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index f4130af09244..fcb92c0d0eb9 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -624,6 +624,7 @@ struct nvsp_message {
#define RNDIS_PKT_ALIGN_DEFAULT 8
struct multi_send_data {
+ struct sk_buff *skb; /* skb containing the pkt */
struct hv_netvsc_packet *pkt; /* netvsc pkt pending */
u32 count; /* counter of batched packets */
};
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 059fc5231601..ec313fc08d82 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -841,6 +841,18 @@ static inline int netvsc_send_pkt(
return ret;
}
+/* Move packet out of multi send data (msd), and clear msd */
+static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
+ struct sk_buff **msd_skb,
+ struct multi_send_data *msdp)
+{
+ *msd_skb = msdp->skb;
+ *msd_send = msdp->pkt;
+ msdp->skb = NULL;
+ msdp->pkt = NULL;
+ msdp->count = 0;
+}
+
int netvsc_send(struct hv_device *device,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
@@ -855,6 +867,7 @@ int netvsc_send(struct hv_device *device,
unsigned int section_index = NETVSC_INVALID_INDEX;
struct multi_send_data *msdp;
struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
+ struct sk_buff *msd_skb = NULL;
bool try_batch;
bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
@@ -897,10 +910,8 @@ int netvsc_send(struct hv_device *device,
net_device->send_section_size) {
section_index = netvsc_get_next_send_section(net_device);
if (section_index != NETVSC_INVALID_INDEX) {
- msd_send = msdp->pkt;
- msdp->pkt = NULL;
- msdp->count = 0;
- msd_len = 0;
+ move_pkt_msd(&msd_send, &msd_skb, msdp);
+ msd_len = 0;
}
}
@@ -919,31 +930,31 @@ int netvsc_send(struct hv_device *device,
packet->total_data_buflen += msd_len;
}
- if (msdp->pkt)
- dev_kfree_skb_any(skb);
+ if (msdp->skb)
+ dev_kfree_skb_any(msdp->skb);
if (xmit_more && !packet->cp_partial) {
+ msdp->skb = skb;
msdp->pkt = packet;
msdp->count++;
} else {
cur_send = packet;
+ msdp->skb = NULL;
msdp->pkt = NULL;
msdp->count = 0;
}
} else {
- msd_send = msdp->pkt;
- msdp->pkt = NULL;
- msdp->count = 0;
+ move_pkt_msd(&msd_send, &msd_skb, msdp);
cur_send = packet;
}
if (msd_send) {
- m_ret = netvsc_send_pkt(msd_send, net_device, pb, skb);
+ m_ret = netvsc_send_pkt(msd_send, net_device, NULL, msd_skb);
if (m_ret != 0) {
netvsc_free_send_slot(net_device,
msd_send->send_buf_index);
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(msd_skb);
}
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 1c8db9afdcda..1d3a66563bac 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -196,65 +196,6 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
return ppi;
}
-union sub_key {
- u64 k;
- struct {
- u8 pad[3];
- u8 kb;
- u32 ka;
- };
-};
-
-/* Toeplitz hash function
- * data: network byte order
- * return: host byte order
- */
-static u32 comp_hash(u8 *key, int klen, void *data, int dlen)
-{
- union sub_key subk;
- int k_next = 4;
- u8 dt;
- int i, j;
- u32 ret = 0;
-
- subk.k = 0;
- subk.ka = ntohl(*(u32 *)key);
-
- for (i = 0; i < dlen; i++) {
- subk.kb = key[k_next];
- k_next = (k_next + 1) % klen;
- dt = ((u8 *)data)[i];
- for (j = 0; j < 8; j++) {
- if (dt & 0x80)
- ret ^= subk.ka;
- dt <<= 1;
- subk.k <<= 1;
- }
- }
-
- return ret;
-}
-
-static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
-{
- struct flow_keys flow;
- int data_len;
-
- if (!skb_flow_dissect_flow_keys(skb, &flow, 0) ||
- !(flow.basic.n_proto == htons(ETH_P_IP) ||
- flow.basic.n_proto == htons(ETH_P_IPV6)))
- return false;
-
- if (flow.basic.ip_proto == IPPROTO_TCP)
- data_len = 12;
- else
- data_len = 8;
-
- *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, &flow, data_len);
-
- return true;
-}
-
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
@@ -267,11 +208,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
return 0;
- if (netvsc_set_hash(&hash, skb)) {
- q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
- ndev->real_num_tx_queues;
- skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
- }
+ hash = skb_get_hash(skb);
+ q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
+ ndev->real_num_tx_queues;
if (!nvsc_dev->chn_table[q_idx])
q_idx = 0;
diff --git a/drivers/net/irda/bfin_sir.h b/drivers/net/irda/bfin_sir.h
index 29cbde8501ed..d47cf14bb4a5 100644
--- a/drivers/net/irda/bfin_sir.h
+++ b/drivers/net/irda/bfin_sir.h
@@ -82,9 +82,6 @@ struct bfin_sir_self {
#define DRIVER_NAME "bfin_sir"
-#define port_membase(port) (((struct bfin_sir_port *)(port))->membase)
-#define get_lsr_cache(port) (((struct bfin_sir_port *)(port))->lsr)
-#define put_lsr_cache(port, v) (((struct bfin_sir_port *)(port))->lsr = (v))
#include <asm/bfin_serial.h>
static const unsigned short per[][4] = {
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 6a57a005e0ca..94e688805dd2 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1323,6 +1323,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
list_add_tail_rcu(&vlan->list, &port->vlans);
netif_stacked_transfer_operstate(lowerdev, dev);
+ linkwatch_fire_event(dev);
return 0;
@@ -1522,6 +1523,7 @@ static int macvlan_device_event(struct notifier_block *unused,
port = macvlan_port_get_rtnl(dev);
switch (event) {
+ case NETDEV_UP:
case NETDEV_CHANGE:
list_for_each_entry(vlan, &port->vlans, list)
netif_stacked_transfer_operstate(vlan->lowerdev,
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 60994a83a0d6..f0a77020037a 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -186,6 +186,7 @@ config MDIO_GPIO
config MDIO_OCTEON
tristate "Support for MDIO buses on Octeon and ThunderX SOCs"
depends on 64BIT
+ depends on HAS_IOMEM
help
This module provides a driver for the Octeon and ThunderX MDIO
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 180f69952779..7a240fce3a7e 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -846,6 +846,11 @@ static void decode_rxts(struct dp83640_private *dp83640,
struct skb_shared_hwtstamps *shhwtstamps = NULL;
struct sk_buff *skb;
unsigned long flags;
+ u8 overflow;
+
+ overflow = (phy_rxts->ns_hi >> 14) & 0x3;
+ if (overflow)
+ pr_debug("rx timestamp queue overflow, count %d\n", overflow);
spin_lock_irqsave(&dp83640->rx_lock, flags);
@@ -888,6 +893,7 @@ static void decode_txts(struct dp83640_private *dp83640,
struct skb_shared_hwtstamps shhwtstamps;
struct sk_buff *skb;
u64 ns;
+ u8 overflow;
/* We must already have the skb that triggered this. */
@@ -897,6 +903,17 @@ static void decode_txts(struct dp83640_private *dp83640,
pr_debug("have timestamp but tx_queue empty\n");
return;
}
+
+ overflow = (phy_txts->ns_hi >> 14) & 0x3;
+ if (overflow) {
+ pr_debug("tx timestamp queue overflow, count %d\n", overflow);
+ while (skb) {
+ skb_complete_tx_timestamp(skb, NULL);
+ skb = skb_dequeue(&dp83640->tx_queue);
+ }
+ return;
+ }
+
ns = phy2txts(phy_txts);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(ns);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 8763bb20988a..5590b9c182c9 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -692,25 +692,29 @@ void phy_change(struct work_struct *work)
struct phy_device *phydev =
container_of(work, struct phy_device, phy_queue);
- if (phydev->drv->did_interrupt &&
- !phydev->drv->did_interrupt(phydev))
- goto ignore;
+ if (phy_interrupt_is_valid(phydev)) {
+ if (phydev->drv->did_interrupt &&
+ !phydev->drv->did_interrupt(phydev))
+ goto ignore;
- if (phy_disable_interrupts(phydev))
- goto phy_err;
+ if (phy_disable_interrupts(phydev))
+ goto phy_err;
+ }
mutex_lock(&phydev->lock);
if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
phydev->state = PHY_CHANGELINK;
mutex_unlock(&phydev->lock);
- atomic_dec(&phydev->irq_disable);
- enable_irq(phydev->irq);
+ if (phy_interrupt_is_valid(phydev)) {
+ atomic_dec(&phydev->irq_disable);
+ enable_irq(phydev->irq);
- /* Reenable interrupts */
- if (PHY_HALTED != phydev->state &&
- phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
- goto irq_enable_err;
+ /* Reenable interrupts */
+ if (PHY_HALTED != phydev->state &&
+ phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
+ goto irq_enable_err;
+ }
/* reschedule state queue work to run as soon as possible */
cancel_delayed_work_sync(&phydev->state_queue);
@@ -905,10 +909,10 @@ void phy_state_machine(struct work_struct *work)
phydev->adjust_link(phydev->attached_dev);
break;
case PHY_RUNNING:
- /* Only register a CHANGE if we are polling or ignoring
- * interrupts and link changed since latest checking.
+ /* Only register a CHANGE if we are polling and link changed
+ * since latest checking.
*/
- if (!phy_interrupt_is_valid(phydev)) {
+ if (phydev->irq == PHY_POLL) {
old_link = phydev->link;
err = phy_read_status(phydev);
if (err)
@@ -1000,15 +1004,21 @@ void phy_state_machine(struct work_struct *work)
phy_state_to_str(old_state),
phy_state_to_str(phydev->state));
- queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
- PHY_STATE_TIME * HZ);
+ /* Only re-schedule a PHY state machine change if we are polling the
+ * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving
+ * between states from phy_mac_interrupt()
+ */
+ if (phydev->irq == PHY_POLL)
+ queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
+ PHY_STATE_TIME * HZ);
}
void phy_mac_interrupt(struct phy_device *phydev, int new_link)
{
- cancel_work_sync(&phydev->phy_queue);
phydev->link = new_link;
- schedule_work(&phydev->phy_queue);
+
+ /* Trigger a state machine change */
+ queue_work(system_power_efficient_wq, &phydev->phy_queue);
}
EXPORT_SYMBOL(phy_mac_interrupt);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index e485f2653c82..2e21e9366f76 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -24,6 +24,10 @@
#include <linux/netdevice.h>
#include <linux/smscphy.h>
+struct smsc_phy_priv {
+ bool energy_enable;
+};
+
static int smsc_phy_config_intr(struct phy_device *phydev)
{
int rc = phy_write (phydev, MII_LAN83C185_IM,
@@ -43,19 +47,14 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
static int smsc_phy_config_init(struct phy_device *phydev)
{
- int __maybe_unused len;
- struct device *dev __maybe_unused = &phydev->mdio.dev;
- struct device_node *of_node __maybe_unused = dev->of_node;
+ struct smsc_phy_priv *priv = phydev->priv;
+
int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
- int enable_energy = 1;
if (rc < 0)
return rc;
- if (of_find_property(of_node, "smsc,disable-energy-detect", &len))
- enable_energy = 0;
-
- if (enable_energy) {
+ if (priv->energy_enable) {
/* Enable energy detect mode for this SMSC Transceivers */
rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
rc | MII_LAN83C185_EDPWRDOWN);
@@ -110,10 +109,13 @@ static int lan911x_config_init(struct phy_device *phydev)
*/
static int lan87xx_read_status(struct phy_device *phydev)
{
+ struct smsc_phy_priv *priv = phydev->priv;
+
int err = genphy_read_status(phydev);
- int i;
- if (!phydev->link) {
+ if (!phydev->link && priv->energy_enable) {
+ int i;
+
/* Disable EDPD to wake up PHY */
int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
if (rc < 0)
@@ -149,6 +151,26 @@ static int lan87xx_read_status(struct phy_device *phydev)
return err;
}
+static int smsc_phy_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct device_node *of_node = dev->of_node;
+ struct smsc_phy_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->energy_enable = true;
+
+ if (of_property_read_bool(of_node, "smsc,disable-energy-detect"))
+ priv->energy_enable = false;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
static struct phy_driver smsc_phy_driver[] = {
{
.phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */
@@ -159,6 +181,8 @@ static struct phy_driver smsc_phy_driver[] = {
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+ .probe = smsc_phy_probe,
+
/* basic functions */
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -180,6 +204,8 @@ static struct phy_driver smsc_phy_driver[] = {
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+ .probe = smsc_phy_probe,
+
/* basic functions */
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -201,6 +227,8 @@ static struct phy_driver smsc_phy_driver[] = {
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+ .probe = smsc_phy_probe,
+
/* basic functions */
.config_aneg = genphy_config_aneg,
.read_status = lan87xx_read_status,
@@ -222,6 +250,8 @@ static struct phy_driver smsc_phy_driver[] = {
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+ .probe = smsc_phy_probe,
+
/* basic functions */
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -242,6 +272,8 @@ static struct phy_driver smsc_phy_driver[] = {
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+ .probe = smsc_phy_probe,
+
/* basic functions */
.config_aneg = genphy_config_aneg,
.read_status = lan87xx_read_status,
@@ -263,6 +295,8 @@ static struct phy_driver smsc_phy_driver[] = {
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+ .probe = smsc_phy_probe,
+
/* basic functions */
.config_aneg = genphy_config_aneg,
.read_status = lan87xx_read_status,
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 90868ca5e341..ae0905ed4a32 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -129,24 +129,27 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr)
return i < MAX_CALLID;
}
-static int add_chan(struct pppox_sock *sock)
+static int add_chan(struct pppox_sock *sock,
+ struct pptp_addr *sa)
{
static int call_id;
spin_lock(&chan_lock);
- if (!sock->proto.pptp.src_addr.call_id) {
+ if (!sa->call_id) {
call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
if (call_id == MAX_CALLID) {
call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
if (call_id == MAX_CALLID)
goto out_err;
}
- sock->proto.pptp.src_addr.call_id = call_id;
- } else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap))
+ sa->call_id = call_id;
+ } else if (test_bit(sa->call_id, callid_bitmap)) {
goto out_err;
+ }
- set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
- rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock);
+ sock->proto.pptp.src_addr = *sa;
+ set_bit(sa->call_id, callid_bitmap);
+ rcu_assign_pointer(callid_sock[sa->call_id], sock);
spin_unlock(&chan_lock);
return 0;
@@ -416,7 +419,6 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
struct sock *sk = sock->sk;
struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
struct pppox_sock *po = pppox_sk(sk);
- struct pptp_opt *opt = &po->proto.pptp;
int error = 0;
if (sockaddr_len < sizeof(struct sockaddr_pppox))
@@ -424,10 +426,22 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
lock_sock(sk);
- opt->src_addr = sp->sa_addr.pptp;
- if (add_chan(po))
+ if (sk->sk_state & PPPOX_DEAD) {
+ error = -EALREADY;
+ goto out;
+ }
+
+ if (sk->sk_state & PPPOX_BOUND) {
error = -EBUSY;
+ goto out;
+ }
+
+ if (add_chan(po, &sp->sa_addr.pptp))
+ error = -EBUSY;
+ else
+ sk->sk_state |= PPPOX_BOUND;
+out:
release_sock(sk);
return error;
}
@@ -498,7 +512,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
}
opt->dst_addr = sp->sa_addr.pptp;
- sk->sk_state = PPPOX_CONNECTED;
+ sk->sk_state |= PPPOX_CONNECTED;
end:
release_sock(sk);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 2ed53331bfb2..1c299b8a162d 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -36,7 +36,7 @@
#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
#define DRIVER_NAME "lan78xx"
-#define DRIVER_VERSION "1.0.1"
+#define DRIVER_VERSION "1.0.2"
#define TX_TIMEOUT_JIFFIES (5 * HZ)
#define THROTTLE_JIFFIES (HZ / 8)
@@ -462,32 +462,53 @@ static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
u32 length, u8 *data)
{
u32 val;
+ u32 saved;
int i, ret;
+ int retval;
- ret = lan78xx_eeprom_confirm_not_busy(dev);
- if (ret)
- return ret;
+ /* depends on chip, some EEPROM pins are muxed with LED function.
+ * disable & restore LED function to access EEPROM.
+ */
+ ret = lan78xx_read_reg(dev, HW_CFG, &val);
+ saved = val;
+ if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) {
+ val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
+ ret = lan78xx_write_reg(dev, HW_CFG, val);
+ }
+
+ retval = lan78xx_eeprom_confirm_not_busy(dev);
+ if (retval)
+ return retval;
for (i = 0; i < length; i++) {
val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
ret = lan78xx_write_reg(dev, E2P_CMD, val);
- if (unlikely(ret < 0))
- return -EIO;
+ if (unlikely(ret < 0)) {
+ retval = -EIO;
+ goto exit;
+ }
- ret = lan78xx_wait_eeprom(dev);
- if (ret < 0)
- return ret;
+ retval = lan78xx_wait_eeprom(dev);
+ if (retval < 0)
+ goto exit;
ret = lan78xx_read_reg(dev, E2P_DATA, &val);
- if (unlikely(ret < 0))
- return -EIO;
+ if (unlikely(ret < 0)) {
+ retval = -EIO;
+ goto exit;
+ }
data[i] = val & 0xFF;
offset++;
}
- return 0;
+ retval = 0;
+exit:
+ if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000)
+ ret = lan78xx_write_reg(dev, HW_CFG, saved);
+
+ return retval;
}
static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
@@ -509,44 +530,67 @@ static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
u32 length, u8 *data)
{
u32 val;
+ u32 saved;
int i, ret;
+ int retval;
- ret = lan78xx_eeprom_confirm_not_busy(dev);
- if (ret)
- return ret;
+ /* depends on chip, some EEPROM pins are muxed with LED function.
+ * disable & restore LED function to access EEPROM.
+ */
+ ret = lan78xx_read_reg(dev, HW_CFG, &val);
+ saved = val;
+ if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) {
+ val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
+ ret = lan78xx_write_reg(dev, HW_CFG, val);
+ }
+
+ retval = lan78xx_eeprom_confirm_not_busy(dev);
+ if (retval)
+ goto exit;
/* Issue write/erase enable command */
val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
ret = lan78xx_write_reg(dev, E2P_CMD, val);
- if (unlikely(ret < 0))
- return -EIO;
+ if (unlikely(ret < 0)) {
+ retval = -EIO;
+ goto exit;
+ }
- ret = lan78xx_wait_eeprom(dev);
- if (ret < 0)
- return ret;
+ retval = lan78xx_wait_eeprom(dev);
+ if (retval < 0)
+ goto exit;
for (i = 0; i < length; i++) {
/* Fill data register */
val = data[i];
ret = lan78xx_write_reg(dev, E2P_DATA, val);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ retval = -EIO;
+ goto exit;
+ }
/* Send "write" command */
val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
ret = lan78xx_write_reg(dev, E2P_CMD, val);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ retval = -EIO;
+ goto exit;
+ }
- ret = lan78xx_wait_eeprom(dev);
- if (ret < 0)
- return ret;
+ retval = lan78xx_wait_eeprom(dev);
+ if (retval < 0)
+ goto exit;
offset++;
}
- return 0;
+ retval = 0;
+exit:
+ if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000)
+ ret = lan78xx_write_reg(dev, HW_CFG, saved);
+
+ return retval;
}
static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
@@ -904,7 +948,6 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
if (!phydev->link && dev->link_on) {
dev->link_on = false;
- netif_carrier_off(dev->net);
/* reset MAC */
ret = lan78xx_read_reg(dev, MAC_CR, &buf);
@@ -914,6 +957,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
ret = lan78xx_write_reg(dev, MAC_CR, buf);
if (unlikely(ret < 0))
return -EIO;
+
+ phy_mac_interrupt(phydev, 0);
} else if (phydev->link && !dev->link_on) {
dev->link_on = true;
@@ -953,7 +998,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
- netif_carrier_on(dev->net);
+ phy_mac_interrupt(phydev, 1);
}
return ret;
@@ -1495,7 +1540,6 @@ done:
static int lan78xx_mdio_init(struct lan78xx_net *dev)
{
int ret;
- int i;
dev->mdiobus = mdiobus_alloc();
if (!dev->mdiobus) {
@@ -1511,10 +1555,6 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev)
snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
dev->udev->bus->busnum, dev->udev->devnum);
- /* handle our own interrupt */
- for (i = 0; i < PHY_MAX_ADDR; i++)
- dev->mdiobus->irq[i] = PHY_IGNORE_INTERRUPT;
-
switch (dev->devid & ID_REV_CHIP_ID_MASK_) {
case 0x78000000:
case 0x78500000:
@@ -1558,6 +1598,16 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
return -EIO;
}
+ /* Enable PHY interrupts.
+ * We handle our own interrupt
+ */
+ ret = phy_read(phydev, LAN88XX_INT_STS);
+ ret = phy_write(phydev, LAN88XX_INT_MASK,
+ LAN88XX_INT_MASK_MDINTPIN_EN_ |
+ LAN88XX_INT_MASK_LINK_CHANGE_);
+
+ phydev->irq = PHY_IGNORE_INTERRUPT;
+
ret = phy_connect_direct(dev->net, phydev,
lan78xx_link_status_change,
PHY_INTERFACE_MODE_GMII);
@@ -1580,14 +1630,6 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
SUPPORTED_Pause | SUPPORTED_Asym_Pause);
genphy_config_aneg(phydev);
- /* Workaround to enable PHY interrupt.
- * phy_start_interrupts() is API for requesting and enabling
- * PHY interrupt. However, USB-to-Ethernet device can't use
- * request_irq() called in phy_start_interrupts().
- * Set PHY to PHY_HALTED and call phy_start()
- * to make a call to phy_enable_interrupts()
- */
- phy_stop(phydev);
phy_start(phydev);
netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
@@ -2221,7 +2263,9 @@ netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
if (skb2) {
skb_queue_tail(&dev->txq_pend, skb2);
- if (skb_queue_len(&dev->txq_pend) > 10)
+ /* throttle TX patch at slower than SUPER SPEED USB */
+ if ((dev->udev->speed < USB_SPEED_SUPER) &&
+ (skb_queue_len(&dev->txq_pend) > 10))
netif_stop_queue(net);
} else {
netif_dbg(dev, tx_err, dev->net,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 2d88c799d2ac..65439188c582 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -73,7 +73,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
static int vxlan_net_id;
static struct rtnl_link_ops vxlan_link_ops;
-static const u8 all_zeros_mac[ETH_ALEN];
+static const u8 all_zeros_mac[ETH_ALEN + 2];
static int vxlan_sock_add(struct vxlan_dev *vxlan);
@@ -1985,11 +1985,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
vxlan->cfg.port_max, true);
if (info) {
- if (info->key.tun_flags & TUNNEL_CSUM)
- flags |= VXLAN_F_UDP_CSUM;
- else
- flags &= ~VXLAN_F_UDP_CSUM;
-
ttl = info->key.ttl;
tos = info->key.tos;
@@ -2004,8 +1999,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto drop;
sk = vxlan->vn4_sock->sock->sk;
- if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT))
- df = htons(IP_DF);
+ if (info) {
+ if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
+ df = htons(IP_DF);
+
+ if (info->key.tun_flags & TUNNEL_CSUM)
+ flags |= VXLAN_F_UDP_CSUM;
+ else
+ flags &= ~VXLAN_F_UDP_CSUM;
+ }
memset(&fl4, 0, sizeof(fl4));
fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
@@ -2101,6 +2103,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
return;
}
+ if (info) {
+ if (info->key.tun_flags & TUNNEL_CSUM)
+ flags &= ~VXLAN_F_UDP_ZERO_CSUM6_TX;
+ else
+ flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
+ }
+
ttl = ttl ? : ip6_dst_hoplimit(ndst);
err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr,
0, ttl, src_port, dst_port, htonl(vni << 8), md,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index a7afdeee698c..73fb4232f9f2 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -150,18 +150,18 @@ int ath9k_hw_nvram_swap_data(struct ath_hw *ah, bool *swap_needed, int size)
return -EIO;
}
- if (magic == AR5416_EEPROM_MAGIC) {
- *swap_needed = false;
- } else if (swab16(magic) == AR5416_EEPROM_MAGIC) {
+ *swap_needed = false;
+ if (swab16(magic) == AR5416_EEPROM_MAGIC) {
if (ah->ah_flags & AH_NO_EEP_SWAP) {
ath_info(common,
"Ignoring endianness difference in EEPROM magic bytes.\n");
-
- *swap_needed = false;
} else {
*swap_needed = true;
}
- } else {
+ } else if (magic != AR5416_EEPROM_MAGIC) {
+ if (ath9k_hw_use_flash(ah))
+ return 0;
+
ath_err(common,
"Invalid EEPROM Magic (0x%04x).\n", magic);
return -EINVAL;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 53637399bb99..b98db8a0a069 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -879,11 +879,24 @@ int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
return 0;
}
-static void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
+void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
{
+ struct sdio_func *func;
+ struct mmc_host *host;
+ uint max_blocks;
uint nents;
int err;
+ func = sdiodev->func[2];
+ host = func->card->host;
+ sdiodev->sg_support = host->max_segs > 1;
+ max_blocks = min_t(uint, host->max_blk_count, 511u);
+ sdiodev->max_request_size = min_t(uint, host->max_req_size,
+ max_blocks * func->cur_blksize);
+ sdiodev->max_segment_count = min_t(uint, host->max_segs,
+ SG_MAX_SINGLE_ALLOC);
+ sdiodev->max_segment_size = host->max_seg_size;
+
if (!sdiodev->sg_support)
return;
@@ -1021,9 +1034,6 @@ static void brcmf_sdiod_host_fixup(struct mmc_host *host)
static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
{
- struct sdio_func *func;
- struct mmc_host *host;
- uint max_blocks;
int ret = 0;
sdiodev->num_funcs = 2;
@@ -1054,26 +1064,6 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
goto out;
}
- /*
- * determine host related variables after brcmf_sdiod_probe()
- * as func->cur_blksize is properly set and F2 init has been
- * completed successfully.
- */
- func = sdiodev->func[2];
- host = func->card->host;
- sdiodev->sg_support = host->max_segs > 1;
- max_blocks = min_t(uint, host->max_blk_count, 511u);
- sdiodev->max_request_size = min_t(uint, host->max_req_size,
- max_blocks * func->cur_blksize);
- sdiodev->max_segment_count = min_t(uint, host->max_segs,
- SG_MAX_SINGLE_ALLOC);
- sdiodev->max_segment_size = host->max_seg_size;
-
- /* allocate scatter-gather table. sg support
- * will be disabled upon allocation failure.
- */
- brcmf_sdiod_sgtable_alloc(sdiodev);
-
ret = brcmf_sdiod_freezer_attach(sdiodev);
if (ret)
goto out;
@@ -1084,7 +1074,7 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
ret = -ENODEV;
goto out;
}
- brcmf_sdiod_host_fixup(host);
+ brcmf_sdiod_host_fixup(sdiodev->func[2]->card->host);
out:
if (ret)
brcmf_sdiod_remove(sdiodev);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 4265b50faa98..cfee477a6eb1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/netdevice.h>
+#include <linux/module.h>
#include <brcmu_wifi.h>
#include <brcmu_utils.h>
#include "core.h"
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index dd6614332836..a14d9d9da094 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -4114,6 +4114,11 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
goto fail;
}
+ /* allocate scatter-gather table. sg support
+ * will be disabled upon allocation failure.
+ */
+ brcmf_sdiod_sgtable_alloc(bus->sdiodev);
+
/* Query the F2 block size, set roundup accordingly */
bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
bus->roundup = min(max_roundup, bus->blocksize);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
index 5ec7a6d87672..23f223150cef 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
@@ -342,6 +342,7 @@ int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
/* Issue an abort to the specified function */
int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
+void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
enum brcmf_sdiod_state state);
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
index e60cf141ed79..fa41a5e1c890 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
@@ -74,16 +74,19 @@
#define IWL7260_UCODE_API_MAX 17
#define IWL7265_UCODE_API_MAX 17
#define IWL7265D_UCODE_API_MAX 20
+#define IWL3168_UCODE_API_MAX 20
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 13
#define IWL7265_UCODE_API_OK 13
#define IWL7265D_UCODE_API_OK 13
+#define IWL3168_UCODE_API_OK 20
/* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 13
#define IWL7265_UCODE_API_MIN 13
#define IWL7265D_UCODE_API_MIN 13
+#define IWL3168_UCODE_API_MIN 20
/* NVM versions */
#define IWL7260_NVM_VERSION 0x0a1d
@@ -92,6 +95,8 @@
#define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL3165_NVM_VERSION 0x709
#define IWL3165_TX_POWER_VERSION 0xffff /* meaningless */
+#define IWL3168_NVM_VERSION 0xd01
+#define IWL3168_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL7265_NVM_VERSION 0x0a1d
#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL7265D_NVM_VERSION 0x0c11
@@ -109,6 +114,9 @@
#define IWL3160_FW_PRE "iwlwifi-3160-"
#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
+#define IWL3168_FW_PRE "iwlwifi-3168-"
+#define IWL3168_MODULE_FIRMWARE(api) IWL3168_FW_PRE __stringify(api) ".ucode"
+
#define IWL7265_FW_PRE "iwlwifi-7265-"
#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
@@ -180,6 +188,12 @@ static const struct iwl_ht_params iwl7000_ht_params = {
.ucode_api_ok = IWL7265_UCODE_API_OK, \
.ucode_api_min = IWL7265_UCODE_API_MIN
+#define IWL_DEVICE_3008 \
+ IWL_DEVICE_7000_COMMON, \
+ .ucode_api_max = IWL3168_UCODE_API_MAX, \
+ .ucode_api_ok = IWL3168_UCODE_API_OK, \
+ .ucode_api_min = IWL3168_UCODE_API_MIN
+
#define IWL_DEVICE_7005D \
IWL_DEVICE_7000_COMMON, \
.ucode_api_max = IWL7265D_UCODE_API_MAX, \
@@ -299,11 +313,11 @@ const struct iwl_cfg iwl3165_2ac_cfg = {
const struct iwl_cfg iwl3168_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 3168",
- .fw_name_pre = IWL7265D_FW_PRE,
- IWL_DEVICE_7000,
+ .fw_name_pre = IWL3168_FW_PRE,
+ IWL_DEVICE_3008,
.ht_params = &iwl7000_ht_params,
- .nvm_ver = IWL3165_NVM_VERSION,
- .nvm_calib_ver = IWL3165_TX_POWER_VERSION,
+ .nvm_ver = IWL3168_NVM_VERSION,
+ .nvm_calib_ver = IWL3168_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
.dccm_len = IWL7265_DCCM_LEN,
};
@@ -376,5 +390,6 @@ const struct iwl_cfg iwl7265d_n_cfg = {
MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
+MODULE_FIRMWARE(IWL3168_MODULE_FIRMWARE(IWL3168_UCODE_API_OK));
MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_OK));
MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_OK));
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
index 0036d18334af..ba3f0bbddde8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
@@ -510,6 +510,9 @@ struct iwl_mvm_tx_resp {
* @scd_ssn: the index of the last contiguously sent packet
* @txed: number of Txed frames in this batch
* @txed_2_done: number of Acked frames in this batch
+ * @reduced_txp: power reduced according to TPC. This is the actual value and
+ * not a copy from the LQ command. Thus, if not the first rate was used
+ * for Tx-ing then this value will be set to 0 by FW.
*/
struct iwl_mvm_ba_notif {
__le32 sta_addr_lo32;
@@ -524,7 +527,8 @@ struct iwl_mvm_ba_notif {
__le16 scd_ssn;
u8 txed;
u8 txed_2_done;
- __le16 reserved1;
+ u8 reduced_txp;
+ u8 reserved1;
} __packed;
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 7bb6fd0e4391..94caa88df442 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -2,6 +2,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -724,14 +725,28 @@ static int _rs_collect_tx_data(struct iwl_mvm *mvm,
return 0;
}
-static int rs_collect_tx_data(struct iwl_mvm *mvm,
- struct iwl_lq_sta *lq_sta,
- struct iwl_scale_tbl_info *tbl,
- int scale_index, int attempts, int successes,
- u8 reduced_txp)
+static int rs_collect_tpc_data(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl,
+ int scale_index, int attempts, int successes,
+ u8 reduced_txp)
+{
+ struct iwl_rate_scale_data *window = NULL;
+
+ if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
+ return -EINVAL;
+
+ window = &tbl->tpc_win[reduced_txp];
+ return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
+ window);
+}
+
+static int rs_collect_tlc_data(struct iwl_mvm *mvm,
+ struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl,
+ int scale_index, int attempts, int successes)
{
struct iwl_rate_scale_data *window = NULL;
- int ret;
if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
return -EINVAL;
@@ -745,16 +760,6 @@ static int rs_collect_tx_data(struct iwl_mvm *mvm,
/* Select window for current tx bit rate */
window = &(tbl->win[scale_index]);
-
- ret = _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
- window);
- if (ret)
- return ret;
-
- if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
- return -EINVAL;
-
- window = &tbl->tpc_win[reduced_txp];
return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
window);
}
@@ -1301,17 +1306,30 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
* first index into rate scale table.
*/
if (info->flags & IEEE80211_TX_STAT_AMPDU) {
- /* ampdu_ack_len = 0 marks no BA was received. In this case
- * treat it as a single frame loss as we don't want the success
- * ratio to dip too quickly because a BA wasn't received
+ rs_collect_tpc_data(mvm, lq_sta, curr_tbl, lq_rate.index,
+ info->status.ampdu_len,
+ info->status.ampdu_ack_len,
+ reduced_txp);
+
+ /* ampdu_ack_len = 0 marks no BA was received. For TLC, treat
+ * it as a single frame loss as we don't want the success ratio
+ * to dip too quickly because a BA wasn't received.
+ * For TPC, there's no need for this optimisation since we want
+ * to recover very quickly from a bad power reduction and,
+ * therefore we'd like the success ratio to get an immediate hit
+ * when failing to get a BA, so we'd switch back to a lower or
+ * zero power reduction. When FW transmits agg with a rate
+ * different from the initial rate, it will not use reduced txp
+ * and will send BA notification twice (one empty with reduced
+ * txp equal to the value from LQ and one with reduced txp 0).
+ * We need to update counters for each txp level accordingly.
*/
if (info->status.ampdu_ack_len == 0)
info->status.ampdu_len = 1;
- rs_collect_tx_data(mvm, lq_sta, curr_tbl, lq_rate.index,
- info->status.ampdu_len,
- info->status.ampdu_ack_len,
- reduced_txp);
+ rs_collect_tlc_data(mvm, lq_sta, curr_tbl, lq_rate.index,
+ info->status.ampdu_len,
+ info->status.ampdu_ack_len);
/* Update success/fail counts if not searching for new mode */
if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
@@ -1344,9 +1362,13 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
else
continue;
- rs_collect_tx_data(mvm, lq_sta, tmp_tbl, lq_rate.index,
- 1, i < retries ? 0 : legacy_success,
- reduced_txp);
+ rs_collect_tpc_data(mvm, lq_sta, tmp_tbl,
+ lq_rate.index, 1,
+ i < retries ? 0 : legacy_success,
+ reduced_txp);
+ rs_collect_tlc_data(mvm, lq_sta, tmp_tbl,
+ lq_rate.index, 1,
+ i < retries ? 0 : legacy_success);
}
/* Update success/fail counts if not searching for new mode */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 8bf48a7d0f4e..0914ec2fd574 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1029,7 +1029,6 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
mvmsta->tid_data[tid].rate_n_flags =
le32_to_cpu(tx_resp->initial_rate);
- mvmsta->tid_data[tid].reduced_tpc = tx_resp->reduced_tpc;
mvmsta->tid_data[tid].tx_time =
le16_to_cpu(tx_resp->wireless_media_time);
}
@@ -1060,7 +1059,7 @@ static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
/* TODO: not accounted if the whole A-MPDU failed */
info->status.tx_time = tid_data->tx_time;
info->status.status_driver_data[0] =
- (void *)(uintptr_t)tid_data->reduced_tpc;
+ (void *)(uintptr_t)ba_notif->reduced_txp;
info->status.status_driver_data[1] =
(void *)(uintptr_t)tid_data->rate_n_flags;
}
@@ -1133,6 +1132,8 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
scd_flow, ba_resp_scd_ssn, ba_notif->txed,
ba_notif->txed_2_done);
+ IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
+ ba_notif->reduced_txp);
tid_data->next_reclaimed = ba_resp_scd_ssn;
iwl_mvm_check_ratid_empty(mvm, sta, tid);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 6261a68cae90..00335ea6b3eb 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -378,7 +378,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)},
/* 3168 Series */
+ {IWL_PCI_DEVICE(0x24FB, 0x2010, iwl3168_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FB, 0x2110, iwl3168_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FB, 0x2050, iwl3168_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FB, 0x2150, iwl3168_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FB, 0x0000, iwl3168_2ac_cfg)},
/* 7265 Series */
@@ -475,6 +478,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
/* 9000 Series */
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index c32889a1e39c..a28414c50edf 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -991,7 +991,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
goto nla_put_failure;
}
- if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER, ETH_ALEN, hdr->addr2))
+ if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
+ ETH_ALEN, data->addresses[1].addr))
goto nla_put_failure;
/* We get the skb->data */
@@ -2736,7 +2737,7 @@ static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr)
spin_lock_bh(&hwsim_radio_lock);
list_for_each_entry(data, &hwsim_radios, list) {
- if (mac80211_hwsim_addr_match(data, addr)) {
+ if (memcmp(data->addresses[1].addr, addr, ETH_ALEN) == 0) {
_found = true;
break;
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
index 9a3966cd6fbe..155f343981fe 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
@@ -273,8 +273,10 @@ static void rt2400pci_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_PLCPFAIL));
rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL,
!(filter_flags & FIF_CONTROL));
- rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME, 1);
+ rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME,
+ !test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
rt2x00_set_field32(&reg, RXCSR0_DROP_TODS,
+ !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
!rt2x00dev->intf_ap_count);
rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
index 1a6740b4d396..2553cdd74066 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
@@ -274,8 +274,10 @@ static void rt2500pci_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_PLCPFAIL));
rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL,
!(filter_flags & FIF_CONTROL));
- rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME, 1);
+ rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME,
+ !test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
rt2x00_set_field32(&reg, RXCSR0_DROP_TODS,
+ !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
!rt2x00dev->intf_ap_count);
rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
rt2x00_set_field32(&reg, RXCSR0_DROP_MCAST,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
index d26018f30b7d..2d64611de300 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
@@ -437,8 +437,10 @@ static void rt2500usb_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_PLCPFAIL));
rt2x00_set_field16(&reg, TXRX_CSR2_DROP_CONTROL,
!(filter_flags & FIF_CONTROL));
- rt2x00_set_field16(&reg, TXRX_CSR2_DROP_NOT_TO_ME, 1);
+ rt2x00_set_field16(&reg, TXRX_CSR2_DROP_NOT_TO_ME,
+ !test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
rt2x00_set_field16(&reg, TXRX_CSR2_DROP_TODS,
+ !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
!rt2x00dev->intf_ap_count);
rt2x00_set_field16(&reg, TXRX_CSR2_DROP_VERSION_ERROR, 1);
rt2x00_set_field16(&reg, TXRX_CSR2_DROP_MULTICAST,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 9733b31a780d..a26afcab03ed 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -1490,7 +1490,8 @@ void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_FCSFAIL));
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PHY_ERROR,
!(filter_flags & FIF_PLCPFAIL));
- rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME, 1);
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME,
+ !test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0);
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_VER_ERROR, 1);
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_MULTICAST,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 3282ddb766f4..26427140a963 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -669,6 +669,7 @@ enum rt2x00_state_flags {
CONFIG_POWERSAVING,
CONFIG_HT_DISABLED,
CONFIG_QOS_DISABLED,
+ CONFIG_MONITORING,
/*
* Mark we currently are sequentially reading TX_STA_FIFO register
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00config.c b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c
index 7e8bb1198ae9..6a1f508d472f 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c
@@ -277,6 +277,11 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
else
clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
+ if (conf->flags & IEEE80211_CONF_MONITOR)
+ set_bit(CONFIG_MONITORING, &rt2x00dev->flags);
+ else
+ clear_bit(CONFIG_MONITORING, &rt2x00dev->flags);
+
rt2x00dev->curr_band = conf->chandef.chan->band;
rt2x00dev->curr_freq = conf->chandef.chan->center_freq;
rt2x00dev->tx_power = conf->power_level;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index 3c26ee65a415..13da95a24cf7 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -385,11 +385,6 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
*total_flags |= FIF_PSPOLL;
}
- /*
- * Check if there is any work left for us.
- */
- if (rt2x00dev->packet_filter == *total_flags)
- return;
rt2x00dev->packet_filter = *total_flags;
rt2x00dev->ops->lib->config_filter(rt2x00dev, *total_flags);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
index c0e730ea1b69..24a3436ef952 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
@@ -530,8 +530,10 @@ static void rt61pci_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_PLCPFAIL));
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL,
!(filter_flags & (FIF_CONTROL | FIF_PSPOLL)));
- rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME, 1);
+ rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME,
+ !test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS,
+ !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
!rt2x00dev->intf_ap_count);
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1);
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
index 7081e13b4fd6..7bbc86931168 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
@@ -480,8 +480,10 @@ static void rt73usb_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_PLCPFAIL));
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL,
!(filter_flags & (FIF_CONTROL | FIF_PSPOLL)));
- rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME, 1);
+ rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME,
+ !test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS,
+ !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
!rt2x00dev->intf_ap_count);
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1);
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST,
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index a62bf0a65c32..5be34118e0af 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -351,7 +351,6 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
case COUNTRY_CODE_SPAIN:
case COUNTRY_CODE_FRANCE:
case COUNTRY_CODE_ISRAEL:
- case COUNTRY_CODE_WORLD_WIDE_13:
return &rtl_regdom_12_13;
case COUNTRY_CODE_MKK:
case COUNTRY_CODE_MKK1:
@@ -360,6 +359,7 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
return &rtl_regdom_14_60_64;
case COUNTRY_CODE_GLOBAL_DOMAIN:
return &rtl_regdom_14;
+ case COUNTRY_CODE_WORLD_WIDE_13:
case COUNTRY_CODE_WORLD_WIDE_13_5G_ALL:
return &rtl_regdom_12_13_5g_all;
default:
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index d6abf191122a..96ccd4e943db 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -364,6 +364,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
RING_IDX cons, prod;
unsigned short id;
struct sk_buff *skb;
+ bool more_to_do;
BUG_ON(!netif_carrier_ok(queue->info->netdev));
@@ -398,18 +399,8 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
queue->tx.rsp_cons = prod;
- /*
- * Set a new event, then check for race with update of tx_cons.
- * Note that it is essential to schedule a callback, no matter
- * how few buffers are pending. Even if there is space in the
- * transmit ring, higher layers may be blocked because too much
- * data is outstanding: in such cases notification from Xen is
- * likely to be the only kick that we'll get.
- */
- queue->tx.sring->rsp_event =
- prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1;
- mb(); /* update shared area */
- } while ((cons == prod) && (prod != queue->tx.sring->rsp_prod));
+ RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
+ } while (more_to_do);
xennet_maybe_wake_tx(queue);
}
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 8ebfcaae3f5a..9edf7eb7d17c 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1277,10 +1277,12 @@ static ssize_t mode_show(struct device *dev,
device_lock(dev);
claim = ndns->claim;
- if (pmem_should_map_pages(dev) || (claim && is_nd_pfn(claim)))
- mode = "memory";
- else if (claim && is_nd_btt(claim))
+ if (claim && is_nd_btt(claim))
mode = "safe";
+ else if (claim && is_nd_pfn(claim))
+ mode = "memory";
+ else if (!claim && pmem_should_map_pages(dev))
+ mode = "memory";
else
mode = "raw";
rc = sprintf(buf, "%s\n", mode);
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 0cc9048b86e2..ae81a2f1da50 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -301,10 +301,8 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
switch (le32_to_cpu(pfn_sb->mode)) {
case PFN_MODE_RAM:
- break;
case PFN_MODE_PMEM:
- /* TODO: allocate from PMEM support */
- return -ENOTTY;
+ break;
default:
return -ENXIO;
}
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 706e3ff67f8b..7ee21ae305ae 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -679,18 +679,6 @@ u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in)
return __of_msi_map_rid(dev, &msi_np, rid_in);
}
-static struct irq_domain *__of_get_msi_domain(struct device_node *np,
- enum irq_domain_bus_token token)
-{
- struct irq_domain *d;
-
- d = irq_find_matching_host(np, token);
- if (!d)
- d = irq_find_host(np);
-
- return d;
-}
-
/**
* of_msi_map_get_device_domain - Use msi-map to find the relevant MSI domain
* @dev: device for which the mapping is to be done.
@@ -706,7 +694,7 @@ struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 rid)
struct device_node *np = NULL;
__of_msi_map_rid(dev, &np, rid);
- return __of_get_msi_domain(np, DOMAIN_BUS_PCI_MSI);
+ return irq_find_matching_host(np, DOMAIN_BUS_PCI_MSI);
}
/**
@@ -730,7 +718,7 @@ struct irq_domain *of_msi_get_domain(struct device *dev,
/* Check for a single msi-parent property */
msi_np = of_parse_phandle(np, "msi-parent", 0);
if (msi_np && !of_property_read_bool(msi_np, "#msi-cells")) {
- d = __of_get_msi_domain(msi_np, token);
+ d = irq_find_matching_host(msi_np, token);
if (!d)
of_node_put(msi_np);
return d;
@@ -744,7 +732,7 @@ struct irq_domain *of_msi_get_domain(struct device *dev,
while (!of_parse_phandle_with_args(np, "msi-parent",
"#msi-cells",
index, &args)) {
- d = __of_get_msi_domain(args.np, token);
+ d = irq_find_matching_host(args.np, token);
if (d)
return d;
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 86829f8064a6..5648317d355f 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -143,11 +143,31 @@ int of_mdio_parse_addr(struct device *dev, const struct device_node *np)
}
EXPORT_SYMBOL(of_mdio_parse_addr);
+/* The following is a list of PHY compatible strings which appear in
+ * some DTBs. The compatible string is never matched against a PHY
+ * driver, so is pointless. We only expect devices which are not PHYs
+ * to have a compatible string, so they can be matched to an MDIO
+ * driver. Encourage users to upgrade their DT blobs to remove these.
+ */
+static const struct of_device_id whitelist_phys[] = {
+ { .compatible = "brcm,40nm-ephy" },
+ { .compatible = "marvell,88E1111", },
+ { .compatible = "marvell,88e1116", },
+ { .compatible = "marvell,88e1118", },
+ { .compatible = "marvell,88e1149r", },
+ { .compatible = "marvell,88e1310", },
+ { .compatible = "marvell,88E1510", },
+ { .compatible = "marvell,88E1514", },
+ { .compatible = "moxa,moxart-rtl8201cp", },
+ {}
+};
+
/*
* Return true if the child node is for a phy. It must either:
* o Compatible string of "ethernet-phy-idX.X"
* o Compatible string of "ethernet-phy-ieee802.3-c45"
* o Compatible string of "ethernet-phy-ieee802.3-c22"
+ * o In the white list above (and issue a warning)
* o No compatibility string
*
* A device which is not a phy is expected to have a compatible string
@@ -166,6 +186,13 @@ static bool of_mdiobus_child_is_phy(struct device_node *child)
if (of_device_is_compatible(child, "ethernet-phy-ieee802.3-c22"))
return true;
+ if (of_match_node(whitelist_phys, child)) {
+ pr_warn(FW_WARN
+ "%s: Whitelisted compatible string. Please remove\n",
+ child->full_name);
+ return true;
+ }
+
if (!of_find_property(child, "compatible", NULL))
return true;
@@ -256,11 +283,19 @@ static int of_phy_match(struct device *dev, void *phy_np)
struct phy_device *of_phy_find_device(struct device_node *phy_np)
{
struct device *d;
+ struct mdio_device *mdiodev;
+
if (!phy_np)
return NULL;
d = bus_find_device(&mdio_bus_type, NULL, phy_np, of_phy_match);
- return d ? to_phy_device(d) : NULL;
+ if (d) {
+ mdiodev = to_mdio_device(d);
+ if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY)
+ return to_phy_device(d);
+ }
+
+ return NULL;
}
EXPORT_SYMBOL(of_phy_find_device);
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 5f2fda12e006..fa49f9143b80 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -953,8 +953,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
{
pci_lock_rescan_remove();
- if (slot->flags & SLOT_IS_GOING_AWAY)
+ if (slot->flags & SLOT_IS_GOING_AWAY) {
+ pci_unlock_rescan_remove();
return -ENODEV;
+ }
/* configure all functions */
if (!(slot->flags & SLOT_ENABLED))
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index f700723ca5d6..d28e3ab9479c 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -342,6 +342,7 @@ static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
/* Device IDs of parts that have 32KB MCH space */
static const unsigned int mch_quirk_devices[] = {
0x0154, /* Ivy Bridge */
+ 0x0a04, /* Haswell-ULT */
0x0c00, /* Haswell */
0x1604, /* Broadwell */
};
diff --git a/drivers/ptp/ptp_ixp46x.c b/drivers/ptp/ptp_ixp46x.c
index 934c139916c6..ee4f183ef9ee 100644
--- a/drivers/ptp/ptp_ixp46x.c
+++ b/drivers/ptp/ptp_ixp46x.c
@@ -178,7 +178,6 @@ static int ptp_ixp_adjtime(struct ptp_clock_info *ptp, s64 delta)
static int ptp_ixp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
u64 ns;
- u32 remainder;
unsigned long flags;
struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps);
struct ixp46x_ts_regs *regs = ixp_clock->regs;
@@ -189,8 +188,7 @@ static int ptp_ixp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
spin_unlock_irqrestore(&register_lock, flags);
- ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
- ts->tv_nsec = remainder;
+ *ts = ns_to_timespec64(ns);
return 0;
}
@@ -202,8 +200,7 @@ static int ptp_ixp_settime(struct ptp_clock_info *ptp,
struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps);
struct ixp46x_ts_regs *regs = ixp_clock->regs;
- ns = ts->tv_sec * 1000000000ULL;
- ns += ts->tv_nsec;
+ ns = timespec64_to_ns(ts);
spin_lock_irqsave(&register_lock, flags);
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index c692dfebd0ba..50597f9522fe 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -139,11 +139,11 @@ static ssize_t chp_measurement_chars_read(struct file *filp,
device = container_of(kobj, struct device, kobj);
chp = to_channelpath(device);
- if (!chp->cmg_chars)
+ if (chp->cmg == -1)
return 0;
- return memory_read_from_buffer(buf, count, &off,
- chp->cmg_chars, sizeof(struct cmg_chars));
+ return memory_read_from_buffer(buf, count, &off, &chp->cmg_chars,
+ sizeof(chp->cmg_chars));
}
static struct bin_attribute chp_measurement_chars_attr = {
@@ -416,7 +416,8 @@ static void chp_release(struct device *dev)
* chp_update_desc - update channel-path description
* @chp - channel-path
*
- * Update the channel-path description of the specified channel-path.
+ * Update the channel-path description of the specified channel-path
+ * including channel measurement related information.
* Return zero on success, non-zero otherwise.
*/
int chp_update_desc(struct channel_path *chp)
@@ -428,8 +429,10 @@ int chp_update_desc(struct channel_path *chp)
return rc;
rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
+ if (rc)
+ return rc;
- return rc;
+ return chsc_get_channel_measurement_chars(chp);
}
/**
@@ -466,14 +469,6 @@ int chp_new(struct chp_id chpid)
ret = -ENODEV;
goto out_free;
}
- /* Get channel-measurement characteristics. */
- if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) {
- ret = chsc_get_channel_measurement_chars(chp);
- if (ret)
- goto out_free;
- } else {
- chp->cmg = -1;
- }
dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
/* make it known to the system */
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index 4efd5b867cc3..af0232290dc4 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -48,7 +48,7 @@ struct channel_path {
/* Channel-measurement related stuff: */
int cmg;
int shared;
- void *cmg_chars;
+ struct cmg_chars cmg_chars;
};
/* Return channel_path struct for given chpid. */
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index a831d18596a5..c424c0c7367e 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
#include <asm/cio.h>
@@ -224,8 +225,9 @@ out_unreg:
void chsc_chp_offline(struct chp_id chpid)
{
- char dbf_txt[15];
+ struct channel_path *chp = chpid_to_chp(chpid);
struct chp_link link;
+ char dbf_txt[15];
sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
CIO_TRACE_EVENT(2, dbf_txt);
@@ -236,6 +238,11 @@ void chsc_chp_offline(struct chp_id chpid)
link.chpid = chpid;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
+
+ mutex_lock(&chp->lock);
+ chp_update_desc(chp);
+ mutex_unlock(&chp->lock);
+
for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
}
@@ -690,8 +697,9 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
void chsc_chp_online(struct chp_id chpid)
{
- char dbf_txt[15];
+ struct channel_path *chp = chpid_to_chp(chpid);
struct chp_link link;
+ char dbf_txt[15];
sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
CIO_TRACE_EVENT(2, dbf_txt);
@@ -701,6 +709,11 @@ void chsc_chp_online(struct chp_id chpid)
link.chpid = chpid;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
+
+ mutex_lock(&chp->lock);
+ chp_update_desc(chp);
+ mutex_unlock(&chp->lock);
+
for_each_subchannel_staged(__s390_process_res_acc, NULL,
&link);
css_schedule_reprobe();
@@ -967,22 +980,19 @@ static void
chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
struct cmg_chars *chars)
{
- struct cmg_chars *cmg_chars;
int i, mask;
- cmg_chars = chp->cmg_chars;
for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
mask = 0x80 >> (i + 3);
if (cmcv & mask)
- cmg_chars->values[i] = chars->values[i];
+ chp->cmg_chars.values[i] = chars->values[i];
else
- cmg_chars->values[i] = 0;
+ chp->cmg_chars.values[i] = 0;
}
}
int chsc_get_channel_measurement_chars(struct channel_path *chp)
{
- struct cmg_chars *cmg_chars;
int ccode, ret;
struct {
@@ -1006,10 +1016,11 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
u32 data[NR_MEASUREMENT_CHARS];
} __attribute__ ((packed)) *scmc_area;
- chp->cmg_chars = NULL;
- cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
- if (!cmg_chars)
- return -ENOMEM;
+ chp->shared = -1;
+ chp->cmg = -1;
+
+ if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
+ return 0;
spin_lock_irq(&chsc_page_lock);
memset(chsc_page, 0, PAGE_SIZE);
@@ -1031,25 +1042,19 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
scmc_area->response.code);
goto out;
}
- if (scmc_area->not_valid) {
- chp->cmg = -1;
- chp->shared = -1;
+ if (scmc_area->not_valid)
goto out;
- }
+
chp->cmg = scmc_area->cmg;
chp->shared = scmc_area->shared;
if (chp->cmg != 2 && chp->cmg != 3) {
/* No cmg-dependent data. */
goto out;
}
- chp->cmg_chars = cmg_chars;
chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
(struct cmg_chars *) &scmc_area->data);
out:
spin_unlock_irq(&chsc_page_lock);
- if (!chp->cmg_chars)
- kfree(cmg_chars);
-
return ret;
}
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 7b23f43c7b08..de1b6c1d172c 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -112,9 +112,10 @@ static inline int convert_error(struct zcrypt_device *zdev,
atomic_set(&zcrypt_rescan_req, 1);
zdev->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n",
- zdev->ap_dev->qid);
+ AP_QID_DEVICE(zdev->ap_dev->qid));
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
- zdev->ap_dev->qid, zdev->online, ehdr->reply_code);
+ AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
+ ehdr->reply_code);
return -EAGAIN;
case REP82_ERROR_TRANSPORT_FAIL:
case REP82_ERROR_MACHINE_FAILURE:
@@ -123,16 +124,18 @@ static inline int convert_error(struct zcrypt_device *zdev,
atomic_set(&zcrypt_rescan_req, 1);
zdev->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n",
- zdev->ap_dev->qid);
+ AP_QID_DEVICE(zdev->ap_dev->qid));
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
- zdev->ap_dev->qid, zdev->online, ehdr->reply_code);
+ AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
+ ehdr->reply_code);
return -EAGAIN;
default:
zdev->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n",
- zdev->ap_dev->qid);
+ AP_QID_DEVICE(zdev->ap_dev->qid));
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
- zdev->ap_dev->qid, zdev->online, ehdr->reply_code);
+ AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
+ ehdr->reply_code);
return -EAGAIN; /* repeat the request on a different device. */
}
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 74edf2934e7c..eedfaa2cf715 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -336,9 +336,10 @@ static int convert_type80(struct zcrypt_device *zdev,
/* The result is too short, the CEX2A card may not do that.. */
zdev->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n",
- zdev->ap_dev->qid);
+ AP_QID_DEVICE(zdev->ap_dev->qid));
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
- zdev->ap_dev->qid, zdev->online, t80h->code);
+ AP_QID_DEVICE(zdev->ap_dev->qid),
+ zdev->online, t80h->code);
return -EAGAIN; /* repeat the request on a different device. */
}
@@ -368,9 +369,9 @@ static int convert_response(struct zcrypt_device *zdev,
default: /* Unknown response type, this should NEVER EVER happen */
zdev->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n",
- zdev->ap_dev->qid);
+ AP_QID_DEVICE(zdev->ap_dev->qid));
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
- zdev->ap_dev->qid, zdev->online);
+ AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
return -EAGAIN; /* repeat the request on a different device. */
}
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 9a2dd472c1cc..21959719daef 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -572,9 +572,9 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
return -EINVAL;
zdev->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n",
- zdev->ap_dev->qid);
+ AP_QID_DEVICE(zdev->ap_dev->qid));
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
- zdev->ap_dev->qid, zdev->online,
+ AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
msg->hdr.reply_code);
return -EAGAIN; /* repeat the request on a different device. */
}
@@ -715,9 +715,9 @@ static int convert_response_ica(struct zcrypt_device *zdev,
default: /* Unknown response type, this should NEVER EVER happen */
zdev->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n",
- zdev->ap_dev->qid);
+ AP_QID_DEVICE(zdev->ap_dev->qid));
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
- zdev->ap_dev->qid, zdev->online);
+ AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
return -EAGAIN; /* repeat the request on a different device. */
}
}
@@ -747,9 +747,9 @@ static int convert_response_xcrb(struct zcrypt_device *zdev,
xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
zdev->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n",
- zdev->ap_dev->qid);
+ AP_QID_DEVICE(zdev->ap_dev->qid));
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
- zdev->ap_dev->qid, zdev->online);
+ AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
return -EAGAIN; /* repeat the request on a different device. */
}
}
@@ -773,9 +773,9 @@ static int convert_response_ep11_xcrb(struct zcrypt_device *zdev,
default: /* Unknown response type, this should NEVER EVER happen */
zdev->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n",
- zdev->ap_dev->qid);
+ AP_QID_DEVICE(zdev->ap_dev->qid));
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
- zdev->ap_dev->qid, zdev->online);
+ AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
return -EAGAIN; /* repeat the request on a different device. */
}
}
@@ -800,9 +800,9 @@ static int convert_response_rng(struct zcrypt_device *zdev,
default: /* Unknown response type, this should NEVER EVER happen */
zdev->online = 0;
pr_err("Cryptographic device %x failed and was set offline\n",
- zdev->ap_dev->qid);
+ AP_QID_DEVICE(zdev->ap_dev->qid));
ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
- zdev->ap_dev->qid, zdev->online);
+ AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
return -EAGAIN; /* repeat the request on a different device. */
}
}
diff --git a/drivers/scsi/hisi_sas/Kconfig b/drivers/scsi/hisi_sas/Kconfig
index 37a0c7156087..b67661836c9f 100644
--- a/drivers/scsi/hisi_sas/Kconfig
+++ b/drivers/scsi/hisi_sas/Kconfig
@@ -1,5 +1,7 @@
config SCSI_HISI_SAS
tristate "HiSilicon SAS"
+ depends on HAS_DMA
+ depends on ARM64 || COMPILE_TEST
select SCSI_SAS_LIBSAS
select BLK_DEV_INTEGRITY
help
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 4e08d1cd704d..bb669d32ccd0 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2893,7 +2893,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE)
rw_max = q->limits.io_opt =
- logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
+ sdkp->opt_xfer_blocks * sdp->sector_size;
else
rw_max = BLK_DEF_MAX_SECTORS;
@@ -3268,8 +3268,8 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
struct scsi_disk *sdkp = dev_get_drvdata(dev);
int ret = 0;
- if (!sdkp)
- return 0; /* this can happen */
+ if (!sdkp) /* E.g.: runtime suspend following sd_remove() */
+ return 0;
if (sdkp->WCE && sdkp->media_present) {
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
@@ -3308,6 +3308,9 @@ static int sd_resume(struct device *dev)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
+ if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
+ return 0;
+
if (!sdkp->device->manage_start_stop)
return 0;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 503ab8b46c0b..5e820674432c 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1261,7 +1261,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
}
sfp->mmap_called = 1;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = sfp;
vma->vm_ops = &sg_mmap_vm_ops;
return 0;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 8bd54a64efd6..64c867405ad4 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -144,6 +144,9 @@ static int sr_runtime_suspend(struct device *dev)
{
struct scsi_cd *cd = dev_get_drvdata(dev);
+ if (!cd) /* E.g.: runtime suspend following sr_remove() */
+ return 0;
+
if (cd->media_present)
return -EBUSY;
else
@@ -985,6 +988,7 @@ static int sr_remove(struct device *dev)
scsi_autopm_get_device(cd->device);
del_gendisk(cd->disk);
+ dev_set_drvdata(dev, NULL);
mutex_lock(&sr_ref_mutex);
kref_put(&cd->kref, sr_kref_release);
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 7de6f8472a81..ecc73c0a97cf 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -73,8 +73,8 @@
/* Bitfields in CNTL1 */
#define BCM2835_AUX_SPI_CNTL1_CSHIGH 0x00000700
-#define BCM2835_AUX_SPI_CNTL1_IDLE 0x00000080
-#define BCM2835_AUX_SPI_CNTL1_TXEMPTY 0x00000040
+#define BCM2835_AUX_SPI_CNTL1_TXEMPTY 0x00000080
+#define BCM2835_AUX_SPI_CNTL1_IDLE 0x00000040
#define BCM2835_AUX_SPI_CNTL1_MSBF_IN 0x00000002
#define BCM2835_AUX_SPI_CNTL1_KEEP_IN 0x00000001
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 7fd6a4c009d2..7cb0c1921495 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -84,7 +84,7 @@ struct fsl_espi_transfer {
/* SPCOM register values */
#define SPCOM_CS(x) ((x) << 30)
#define SPCOM_TRANLEN(x) ((x) << 0)
-#define SPCOM_TRANLEN_MAX 0xFFFF /* Max transaction length */
+#define SPCOM_TRANLEN_MAX 0x10000 /* Max transaction length */
#define AUTOSUSPEND_TIMEOUT 2000
@@ -233,7 +233,7 @@ static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
reinit_completion(&mpc8xxx_spi->done);
/* Set SPCOM[CS] and SPCOM[TRANLEN] field */
- if ((t->len - 1) > SPCOM_TRANLEN_MAX) {
+ if (t->len > SPCOM_TRANLEN_MAX) {
dev_err(mpc8xxx_spi->dev, "Transaction length (%d)"
" beyond the SPCOM[TRANLEN] field\n", t->len);
return -EINVAL;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index d98c33cb64f9..6a4ff27f4357 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -929,7 +929,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
tx->sgl, tx->nents, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx)
- goto no_dma;
+ goto tx_nodma;
desc_tx->callback = spi_imx_dma_tx_callback;
desc_tx->callback_param = (void *)spi_imx;
@@ -941,7 +941,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
rx->sgl, rx->nents, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx)
- goto no_dma;
+ goto rx_nodma;
desc_rx->callback = spi_imx_dma_rx_callback;
desc_rx->callback_param = (void *)spi_imx;
@@ -1008,7 +1008,9 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
return ret;
-no_dma:
+rx_nodma:
+ dmaengine_terminate_all(master->dma_tx);
+tx_nodma:
pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
dev_driver_string(&master->dev),
dev_name(&master->dev));
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index 894616f687b0..cf4bb36bee25 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -761,6 +761,7 @@ static int spi_test_run_iter(struct spi_device *spi,
test.iterate_transfer_mask = 1;
/* count number of transfers with tx/rx_buf != NULL */
+ rx_count = tx_count = 0;
for (i = 0; i < test.transfer_count; i++) {
if (test.transfers[i].tx_buf)
tx_count++;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 7273820275e9..0caa3c8bef46 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1490,6 +1490,8 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
return status;
disable_pm:
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
free_master:
spi_master_put(master);
@@ -1501,6 +1503,7 @@ static int omap2_mcspi_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+ pm_runtime_dont_use_autosuspend(mcspi->dev);
pm_runtime_put_sync(mcspi->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index cde5ff7529eb..d1a750760cf3 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -613,9 +613,10 @@ out:
return err;
}
-static int ssb_bus_register(struct ssb_bus *bus,
- ssb_invariants_func_t get_invariants,
- unsigned long baseaddr)
+static int __maybe_unused
+ssb_bus_register(struct ssb_bus *bus,
+ ssb_invariants_func_t get_invariants,
+ unsigned long baseaddr)
{
int err;
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index 58d4517e1836..b9519be90fda 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -6,6 +6,7 @@ menu "Analog to digital converters"
config AD7606
tristate "Analog Devices AD7606 ADC driver"
depends on GPIOLIB || COMPILE_TEST
+ depends on HAS_IOMEM
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
index f129039bece3..69287108f793 100644
--- a/drivers/staging/iio/meter/ade7753.c
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -217,8 +217,12 @@ error_ret:
static int ade7753_reset(struct device *dev)
{
u16 val;
+ int ret;
+
+ ret = ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val);
+ if (ret)
+ return ret;
- ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val);
val |= BIT(6); /* Software Chip Reset */
return ade7753_spi_write_reg_16(dev, ADE7753_MODE, val);
@@ -343,8 +347,12 @@ error_ret:
static int ade7753_stop_device(struct device *dev)
{
u16 val;
+ int ret;
+
+ ret = ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val);
+ if (ret)
+ return ret;
- ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val);
val |= BIT(4); /* AD converters can be turned off */
return ade7753_spi_write_reg_16(dev, ADE7753_MODE, val);
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index 79ac19246548..70b8f4fabfad 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -825,8 +825,7 @@ static void lcd_write_cmd_s(int cmd)
lcd_send_serial(0x1F); /* R/W=W, RS=0 */
lcd_send_serial(cmd & 0x0F);
lcd_send_serial((cmd >> 4) & 0x0F);
- /* the shortest command takes at least 40 us */
- usleep_range(40, 100);
+ udelay(40); /* the shortest command takes at least 40 us */
spin_unlock_irq(&pprt_lock);
}
@@ -837,8 +836,7 @@ static void lcd_write_data_s(int data)
lcd_send_serial(0x5F); /* R/W=W, RS=1 */
lcd_send_serial(data & 0x0F);
lcd_send_serial((data >> 4) & 0x0F);
- /* the shortest data takes at least 40 us */
- usleep_range(40, 100);
+ udelay(40); /* the shortest data takes at least 40 us */
spin_unlock_irq(&pprt_lock);
}
@@ -848,20 +846,19 @@ static void lcd_write_cmd_p8(int cmd)
spin_lock_irq(&pprt_lock);
/* present the data to the data port */
w_dtr(pprt, cmd);
- /* maintain the data during 20 us before the strobe */
- usleep_range(20, 100);
+ udelay(20); /* maintain the data during 20 us before the strobe */
bits.e = BIT_SET;
bits.rs = BIT_CLR;
bits.rw = BIT_CLR;
set_ctrl_bits();
- usleep_range(40, 100); /* maintain the strobe during 40 us */
+ udelay(40); /* maintain the strobe during 40 us */
bits.e = BIT_CLR;
set_ctrl_bits();
- usleep_range(120, 500); /* the shortest command takes at least 120 us */
+ udelay(120); /* the shortest command takes at least 120 us */
spin_unlock_irq(&pprt_lock);
}
@@ -871,20 +868,19 @@ static void lcd_write_data_p8(int data)
spin_lock_irq(&pprt_lock);
/* present the data to the data port */
w_dtr(pprt, data);
- /* maintain the data during 20 us before the strobe */
- usleep_range(20, 100);
+ udelay(20); /* maintain the data during 20 us before the strobe */
bits.e = BIT_SET;
bits.rs = BIT_SET;
bits.rw = BIT_CLR;
set_ctrl_bits();
- usleep_range(40, 100); /* maintain the strobe during 40 us */
+ udelay(40); /* maintain the strobe during 40 us */
bits.e = BIT_CLR;
set_ctrl_bits();
- usleep_range(45, 100); /* the shortest data takes at least 45 us */
+ udelay(45); /* the shortest data takes at least 45 us */
spin_unlock_irq(&pprt_lock);
}
@@ -894,7 +890,7 @@ static void lcd_write_cmd_tilcd(int cmd)
spin_lock_irq(&pprt_lock);
/* present the data to the control port */
w_ctr(pprt, cmd);
- usleep_range(60, 120);
+ udelay(60);
spin_unlock_irq(&pprt_lock);
}
@@ -904,7 +900,7 @@ static void lcd_write_data_tilcd(int data)
spin_lock_irq(&pprt_lock);
/* present the data to the data port */
w_dtr(pprt, data);
- usleep_range(60, 120);
+ udelay(60);
spin_unlock_irq(&pprt_lock);
}
@@ -947,7 +943,7 @@ static void lcd_clear_fast_s(void)
lcd_send_serial(0x5F); /* R/W=W, RS=1 */
lcd_send_serial(' ' & 0x0F);
lcd_send_serial((' ' >> 4) & 0x0F);
- usleep_range(40, 100); /* the shortest data takes at least 40 us */
+ udelay(40); /* the shortest data takes at least 40 us */
}
spin_unlock_irq(&pprt_lock);
@@ -971,7 +967,7 @@ static void lcd_clear_fast_p8(void)
w_dtr(pprt, ' ');
/* maintain the data during 20 us before the strobe */
- usleep_range(20, 100);
+ udelay(20);
bits.e = BIT_SET;
bits.rs = BIT_SET;
@@ -979,13 +975,13 @@ static void lcd_clear_fast_p8(void)
set_ctrl_bits();
/* maintain the strobe during 40 us */
- usleep_range(40, 100);
+ udelay(40);
bits.e = BIT_CLR;
set_ctrl_bits();
/* the shortest data takes at least 45 us */
- usleep_range(45, 100);
+ udelay(45);
}
spin_unlock_irq(&pprt_lock);
@@ -1007,7 +1003,7 @@ static void lcd_clear_fast_tilcd(void)
for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
/* present the data to the data port */
w_dtr(pprt, ' ');
- usleep_range(60, 120);
+ udelay(60);
}
spin_unlock_irq(&pprt_lock);
diff --git a/drivers/staging/rdma/Kconfig b/drivers/staging/rdma/Kconfig
index ba8765063174..f1f3ecadf0fb 100644
--- a/drivers/staging/rdma/Kconfig
+++ b/drivers/staging/rdma/Kconfig
@@ -22,12 +22,6 @@ menuconfig STAGING_RDMA
# Please keep entries in alphabetic order
if STAGING_RDMA
-source "drivers/staging/rdma/amso1100/Kconfig"
-
-source "drivers/staging/rdma/ehca/Kconfig"
-
source "drivers/staging/rdma/hfi1/Kconfig"
-source "drivers/staging/rdma/ipath/Kconfig"
-
endif
diff --git a/drivers/staging/rdma/Makefile b/drivers/staging/rdma/Makefile
index 139d78ef2c24..8c7fc1de48a7 100644
--- a/drivers/staging/rdma/Makefile
+++ b/drivers/staging/rdma/Makefile
@@ -1,5 +1,2 @@
# Entries for RDMA_STAGING tree
-obj-$(CONFIG_INFINIBAND_AMSO1100) += amso1100/
-obj-$(CONFIG_INFINIBAND_EHCA) += ehca/
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
-obj-$(CONFIG_INFINIBAND_IPATH) += ipath/
diff --git a/drivers/staging/rdma/amso1100/Kbuild b/drivers/staging/rdma/amso1100/Kbuild
deleted file mode 100644
index 950dfabcd89d..000000000000
--- a/drivers/staging/rdma/amso1100/Kbuild
+++ /dev/null
@@ -1,6 +0,0 @@
-ccflags-$(CONFIG_INFINIBAND_AMSO1100_DEBUG) := -DDEBUG
-
-obj-$(CONFIG_INFINIBAND_AMSO1100) += iw_c2.o
-
-iw_c2-y := c2.o c2_provider.o c2_rnic.o c2_alloc.o c2_mq.o c2_ae.o c2_vq.o \
- c2_intr.o c2_cq.o c2_qp.o c2_cm.o c2_mm.o c2_pd.o
diff --git a/drivers/staging/rdma/amso1100/Kconfig b/drivers/staging/rdma/amso1100/Kconfig
deleted file mode 100644
index e6ce5f209e47..000000000000
--- a/drivers/staging/rdma/amso1100/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
-config INFINIBAND_AMSO1100
- tristate "Ammasso 1100 HCA support"
- depends on PCI && INET
- ---help---
- This is a low-level driver for the Ammasso 1100 host
- channel adapter (HCA).
-
-config INFINIBAND_AMSO1100_DEBUG
- bool "Verbose debugging output"
- depends on INFINIBAND_AMSO1100
- default n
- ---help---
- This option causes the amso1100 driver to produce a bunch of
- debug messages. Select this if you are developing the driver
- or trying to diagnose a problem.
diff --git a/drivers/staging/rdma/amso1100/TODO b/drivers/staging/rdma/amso1100/TODO
deleted file mode 100644
index 18b00a5cb549..000000000000
--- a/drivers/staging/rdma/amso1100/TODO
+++ /dev/null
@@ -1,4 +0,0 @@
-7/2015
-
-The amso1100 driver has been deprecated and moved to drivers/staging.
-It will be removed in the 4.6 merge window.
diff --git a/drivers/staging/rdma/amso1100/c2.c b/drivers/staging/rdma/amso1100/c2.c
deleted file mode 100644
index b46ebd1ae15a..000000000000
--- a/drivers/staging/rdma/amso1100/c2.c
+++ /dev/null
@@ -1,1240 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/if_vlan.h>
-#include <linux/crc32.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <linux/prefetch.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-
-#include <rdma/ib_smi.h>
-#include "c2.h"
-#include "c2_provider.h"
-
-MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
-MODULE_DESCRIPTION("Ammasso AMSO1100 Low-level iWARP Driver");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
-
-static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
- | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
-
-static int debug = -1; /* defaults above */
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-
-static int c2_up(struct net_device *netdev);
-static int c2_down(struct net_device *netdev);
-static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-static void c2_tx_interrupt(struct net_device *netdev);
-static void c2_rx_interrupt(struct net_device *netdev);
-static irqreturn_t c2_interrupt(int irq, void *dev_id);
-static void c2_tx_timeout(struct net_device *netdev);
-static int c2_change_mtu(struct net_device *netdev, int new_mtu);
-static void c2_reset(struct c2_port *c2_port);
-
-static struct pci_device_id c2_pci_table[] = {
- { PCI_DEVICE(0x18b8, 0xb001) },
- { 0 }
-};
-
-MODULE_DEVICE_TABLE(pci, c2_pci_table);
-
-static void c2_set_rxbufsize(struct c2_port *c2_port)
-{
- struct net_device *netdev = c2_port->netdev;
-
- if (netdev->mtu > RX_BUF_SIZE)
- c2_port->rx_buf_size =
- netdev->mtu + ETH_HLEN + sizeof(struct c2_rxp_hdr) +
- NET_IP_ALIGN;
- else
- c2_port->rx_buf_size = sizeof(struct c2_rxp_hdr) + RX_BUF_SIZE;
-}
-
-/*
- * Allocate TX ring elements and chain them together.
- * One-to-one association of adapter descriptors with ring elements.
- */
-static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr,
- dma_addr_t base, void __iomem * mmio_txp_ring)
-{
- struct c2_tx_desc *tx_desc;
- struct c2_txp_desc __iomem *txp_desc;
- struct c2_element *elem;
- int i;
-
- tx_ring->start = kmalloc_array(tx_ring->count, sizeof(*elem),
- GFP_KERNEL);
- if (!tx_ring->start)
- return -ENOMEM;
-
- elem = tx_ring->start;
- tx_desc = vaddr;
- txp_desc = mmio_txp_ring;
- for (i = 0; i < tx_ring->count; i++, elem++, tx_desc++, txp_desc++) {
- tx_desc->len = 0;
- tx_desc->status = 0;
-
- /* Set TXP_HTXD_UNINIT */
- __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
- (void __iomem *) txp_desc + C2_TXP_ADDR);
- __raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN);
- __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
- (void __iomem *) txp_desc + C2_TXP_FLAGS);
-
- elem->skb = NULL;
- elem->ht_desc = tx_desc;
- elem->hw_desc = txp_desc;
-
- if (i == tx_ring->count - 1) {
- elem->next = tx_ring->start;
- tx_desc->next_offset = base;
- } else {
- elem->next = elem + 1;
- tx_desc->next_offset =
- base + (i + 1) * sizeof(*tx_desc);
- }
- }
-
- tx_ring->to_use = tx_ring->to_clean = tx_ring->start;
-
- return 0;
-}
-
-/*
- * Allocate RX ring elements and chain them together.
- * One-to-one association of adapter descriptors with ring elements.
- */
-static int c2_rx_ring_alloc(struct c2_ring *rx_ring, void *vaddr,
- dma_addr_t base, void __iomem * mmio_rxp_ring)
-{
- struct c2_rx_desc *rx_desc;
- struct c2_rxp_desc __iomem *rxp_desc;
- struct c2_element *elem;
- int i;
-
- rx_ring->start = kmalloc_array(rx_ring->count, sizeof(*elem),
- GFP_KERNEL);
- if (!rx_ring->start)
- return -ENOMEM;
-
- elem = rx_ring->start;
- rx_desc = vaddr;
- rxp_desc = mmio_rxp_ring;
- for (i = 0; i < rx_ring->count; i++, elem++, rx_desc++, rxp_desc++) {
- rx_desc->len = 0;
- rx_desc->status = 0;
-
- /* Set RXP_HRXD_UNINIT */
- __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_OK),
- (void __iomem *) rxp_desc + C2_RXP_STATUS);
- __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT);
- __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN);
- __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
- (void __iomem *) rxp_desc + C2_RXP_ADDR);
- __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
- (void __iomem *) rxp_desc + C2_RXP_FLAGS);
-
- elem->skb = NULL;
- elem->ht_desc = rx_desc;
- elem->hw_desc = rxp_desc;
-
- if (i == rx_ring->count - 1) {
- elem->next = rx_ring->start;
- rx_desc->next_offset = base;
- } else {
- elem->next = elem + 1;
- rx_desc->next_offset =
- base + (i + 1) * sizeof(*rx_desc);
- }
- }
-
- rx_ring->to_use = rx_ring->to_clean = rx_ring->start;
-
- return 0;
-}
-
-/* Setup buffer for receiving */
-static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem)
-{
- struct c2_dev *c2dev = c2_port->c2dev;
- struct c2_rx_desc *rx_desc = elem->ht_desc;
- struct sk_buff *skb;
- dma_addr_t mapaddr;
- u32 maplen;
- struct c2_rxp_hdr *rxp_hdr;
-
- skb = dev_alloc_skb(c2_port->rx_buf_size);
- if (unlikely(!skb)) {
- pr_debug("%s: out of memory for receive\n",
- c2_port->netdev->name);
- return -ENOMEM;
- }
-
- /* Zero out the rxp hdr in the sk_buff */
- memset(skb->data, 0, sizeof(*rxp_hdr));
-
- skb->dev = c2_port->netdev;
-
- maplen = c2_port->rx_buf_size;
- mapaddr =
- pci_map_single(c2dev->pcidev, skb->data, maplen,
- PCI_DMA_FROMDEVICE);
-
- /* Set the sk_buff RXP_header to RXP_HRXD_READY */
- rxp_hdr = (struct c2_rxp_hdr *) skb->data;
- rxp_hdr->flags = RXP_HRXD_READY;
-
- __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
- __raw_writew((__force u16) cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)),
- elem->hw_desc + C2_RXP_LEN);
- __raw_writeq((__force u64) cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR);
- __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
- elem->hw_desc + C2_RXP_FLAGS);
-
- elem->skb = skb;
- elem->mapaddr = mapaddr;
- elem->maplen = maplen;
- rx_desc->len = maplen;
-
- return 0;
-}
-
-/*
- * Allocate buffers for the Rx ring
- * For receive: rx_ring.to_clean is next received frame
- */
-static int c2_rx_fill(struct c2_port *c2_port)
-{
- struct c2_ring *rx_ring = &c2_port->rx_ring;
- struct c2_element *elem;
- int ret = 0;
-
- elem = rx_ring->start;
- do {
- if (c2_rx_alloc(c2_port, elem)) {
- ret = 1;
- break;
- }
- } while ((elem = elem->next) != rx_ring->start);
-
- rx_ring->to_clean = rx_ring->start;
- return ret;
-}
-
-/* Free all buffers in RX ring, assumes receiver stopped */
-static void c2_rx_clean(struct c2_port *c2_port)
-{
- struct c2_dev *c2dev = c2_port->c2dev;
- struct c2_ring *rx_ring = &c2_port->rx_ring;
- struct c2_element *elem;
- struct c2_rx_desc *rx_desc;
-
- elem = rx_ring->start;
- do {
- rx_desc = elem->ht_desc;
- rx_desc->len = 0;
-
- __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
- __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
- __raw_writew(0, elem->hw_desc + C2_RXP_LEN);
- __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
- elem->hw_desc + C2_RXP_ADDR);
- __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
- elem->hw_desc + C2_RXP_FLAGS);
-
- if (elem->skb) {
- pci_unmap_single(c2dev->pcidev, elem->mapaddr,
- elem->maplen, PCI_DMA_FROMDEVICE);
- dev_kfree_skb(elem->skb);
- elem->skb = NULL;
- }
- } while ((elem = elem->next) != rx_ring->start);
-}
-
-static inline int c2_tx_free(struct c2_dev *c2dev, struct c2_element *elem)
-{
- struct c2_tx_desc *tx_desc = elem->ht_desc;
-
- tx_desc->len = 0;
-
- pci_unmap_single(c2dev->pcidev, elem->mapaddr, elem->maplen,
- PCI_DMA_TODEVICE);
-
- if (elem->skb) {
- dev_kfree_skb_any(elem->skb);
- elem->skb = NULL;
- }
-
- return 0;
-}
-
-/* Free all buffers in TX ring, assumes transmitter stopped */
-static void c2_tx_clean(struct c2_port *c2_port)
-{
- struct c2_ring *tx_ring = &c2_port->tx_ring;
- struct c2_element *elem;
- struct c2_txp_desc txp_htxd;
- int retry;
- unsigned long flags;
-
- spin_lock_irqsave(&c2_port->tx_lock, flags);
-
- elem = tx_ring->start;
-
- do {
- retry = 0;
- do {
- txp_htxd.flags =
- readw(elem->hw_desc + C2_TXP_FLAGS);
-
- if (txp_htxd.flags == TXP_HTXD_READY) {
- retry = 1;
- __raw_writew(0,
- elem->hw_desc + C2_TXP_LEN);
- __raw_writeq(0,
- elem->hw_desc + C2_TXP_ADDR);
- __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_DONE),
- elem->hw_desc + C2_TXP_FLAGS);
- c2_port->netdev->stats.tx_dropped++;
- break;
- } else {
- __raw_writew(0,
- elem->hw_desc + C2_TXP_LEN);
- __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
- elem->hw_desc + C2_TXP_ADDR);
- __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
- elem->hw_desc + C2_TXP_FLAGS);
- }
-
- c2_tx_free(c2_port->c2dev, elem);
-
- } while ((elem = elem->next) != tx_ring->start);
- } while (retry);
-
- c2_port->tx_avail = c2_port->tx_ring.count - 1;
- c2_port->c2dev->cur_tx = tx_ring->to_use - tx_ring->start;
-
- if (c2_port->tx_avail > MAX_SKB_FRAGS + 1)
- netif_wake_queue(c2_port->netdev);
-
- spin_unlock_irqrestore(&c2_port->tx_lock, flags);
-}
-
-/*
- * Process transmit descriptors marked 'DONE' by the firmware,
- * freeing up their unneeded sk_buffs.
- */
-static void c2_tx_interrupt(struct net_device *netdev)
-{
- struct c2_port *c2_port = netdev_priv(netdev);
- struct c2_dev *c2dev = c2_port->c2dev;
- struct c2_ring *tx_ring = &c2_port->tx_ring;
- struct c2_element *elem;
- struct c2_txp_desc txp_htxd;
-
- spin_lock(&c2_port->tx_lock);
-
- for (elem = tx_ring->to_clean; elem != tx_ring->to_use;
- elem = elem->next) {
- txp_htxd.flags =
- be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_FLAGS));
-
- if (txp_htxd.flags != TXP_HTXD_DONE)
- break;
-
- if (netif_msg_tx_done(c2_port)) {
- /* PCI reads are expensive in fast path */
- txp_htxd.len =
- be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_LEN));
- pr_debug("%s: tx done slot %3Zu status 0x%x len "
- "%5u bytes\n",
- netdev->name, elem - tx_ring->start,
- txp_htxd.flags, txp_htxd.len);
- }
-
- c2_tx_free(c2dev, elem);
- ++(c2_port->tx_avail);
- }
-
- tx_ring->to_clean = elem;
-
- if (netif_queue_stopped(netdev)
- && c2_port->tx_avail > MAX_SKB_FRAGS + 1)
- netif_wake_queue(netdev);
-
- spin_unlock(&c2_port->tx_lock);
-}
-
-static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem)
-{
- struct c2_rx_desc *rx_desc = elem->ht_desc;
- struct c2_rxp_hdr *rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
-
- if (rxp_hdr->status != RXP_HRXD_OK ||
- rxp_hdr->len > (rx_desc->len - sizeof(*rxp_hdr))) {
- pr_debug("BAD RXP_HRXD\n");
- pr_debug(" rx_desc : %p\n", rx_desc);
- pr_debug(" index : %Zu\n",
- elem - c2_port->rx_ring.start);
- pr_debug(" len : %u\n", rx_desc->len);
- pr_debug(" rxp_hdr : %p [PA %p]\n", rxp_hdr,
- (void *) __pa((unsigned long) rxp_hdr));
- pr_debug(" flags : 0x%x\n", rxp_hdr->flags);
- pr_debug(" status: 0x%x\n", rxp_hdr->status);
- pr_debug(" len : %u\n", rxp_hdr->len);
- pr_debug(" rsvd : 0x%x\n", rxp_hdr->rsvd);
- }
-
- /* Setup the skb for reuse since we're dropping this pkt */
- elem->skb->data = elem->skb->head;
- skb_reset_tail_pointer(elem->skb);
-
- /* Zero out the rxp hdr in the sk_buff */
- memset(elem->skb->data, 0, sizeof(*rxp_hdr));
-
- /* Write the descriptor to the adapter's rx ring */
- __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
- __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
- __raw_writew((__force u16) cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)),
- elem->hw_desc + C2_RXP_LEN);
- __raw_writeq((__force u64) cpu_to_be64(elem->mapaddr),
- elem->hw_desc + C2_RXP_ADDR);
- __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
- elem->hw_desc + C2_RXP_FLAGS);
-
- pr_debug("packet dropped\n");
- c2_port->netdev->stats.rx_dropped++;
-}
-
-static void c2_rx_interrupt(struct net_device *netdev)
-{
- struct c2_port *c2_port = netdev_priv(netdev);
- struct c2_dev *c2dev = c2_port->c2dev;
- struct c2_ring *rx_ring = &c2_port->rx_ring;
- struct c2_element *elem;
- struct c2_rx_desc *rx_desc;
- struct c2_rxp_hdr *rxp_hdr;
- struct sk_buff *skb;
- dma_addr_t mapaddr;
- u32 maplen, buflen;
- unsigned long flags;
-
- spin_lock_irqsave(&c2dev->lock, flags);
-
- /* Begin where we left off */
- rx_ring->to_clean = rx_ring->start + c2dev->cur_rx;
-
- for (elem = rx_ring->to_clean; elem->next != rx_ring->to_clean;
- elem = elem->next) {
- rx_desc = elem->ht_desc;
- mapaddr = elem->mapaddr;
- maplen = elem->maplen;
- skb = elem->skb;
- rxp_hdr = (struct c2_rxp_hdr *) skb->data;
-
- if (rxp_hdr->flags != RXP_HRXD_DONE)
- break;
- buflen = rxp_hdr->len;
-
- /* Sanity check the RXP header */
- if (rxp_hdr->status != RXP_HRXD_OK ||
- buflen > (rx_desc->len - sizeof(*rxp_hdr))) {
- c2_rx_error(c2_port, elem);
- continue;
- }
-
- /*
- * Allocate and map a new skb for replenishing the host
- * RX desc
- */
- if (c2_rx_alloc(c2_port, elem)) {
- c2_rx_error(c2_port, elem);
- continue;
- }
-
- /* Unmap the old skb */
- pci_unmap_single(c2dev->pcidev, mapaddr, maplen,
- PCI_DMA_FROMDEVICE);
-
- prefetch(skb->data);
-
- /*
- * Skip past the leading 8 bytes comprising of the
- * "struct c2_rxp_hdr", prepended by the adapter
- * to the usual Ethernet header ("struct ethhdr"),
- * to the start of the raw Ethernet packet.
- *
- * Fix up the various fields in the sk_buff before
- * passing it up to netif_rx(). The transfer size
- * (in bytes) specified by the adapter len field of
- * the "struct rxp_hdr_t" does NOT include the
- * "sizeof(struct c2_rxp_hdr)".
- */
- skb->data += sizeof(*rxp_hdr);
- skb_set_tail_pointer(skb, buflen);
- skb->len = buflen;
- skb->protocol = eth_type_trans(skb, netdev);
-
- netif_rx(skb);
-
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += buflen;
- }
-
- /* Save where we left off */
- rx_ring->to_clean = elem;
- c2dev->cur_rx = elem - rx_ring->start;
- C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
-
- spin_unlock_irqrestore(&c2dev->lock, flags);
-}
-
-/*
- * Handle netisr0 TX & RX interrupts.
- */
-static irqreturn_t c2_interrupt(int irq, void *dev_id)
-{
- unsigned int netisr0, dmaisr;
- int handled = 0;
- struct c2_dev *c2dev = dev_id;
-
- /* Process CCILNET interrupts */
- netisr0 = readl(c2dev->regs + C2_NISR0);
- if (netisr0) {
-
- /*
- * There is an issue with the firmware that always
- * provides the status of RX for both TX & RX
- * interrupts. So process both queues here.
- */
- c2_rx_interrupt(c2dev->netdev);
- c2_tx_interrupt(c2dev->netdev);
-
- /* Clear the interrupt */
- writel(netisr0, c2dev->regs + C2_NISR0);
- handled++;
- }
-
- /* Process RNIC interrupts */
- dmaisr = readl(c2dev->regs + C2_DISR);
- if (dmaisr) {
- writel(dmaisr, c2dev->regs + C2_DISR);
- c2_rnic_interrupt(c2dev);
- handled++;
- }
-
- if (handled) {
- return IRQ_HANDLED;
- } else {
- return IRQ_NONE;
- }
-}
-
-static int c2_up(struct net_device *netdev)
-{
- struct c2_port *c2_port = netdev_priv(netdev);
- struct c2_dev *c2dev = c2_port->c2dev;
- struct c2_element *elem;
- struct c2_rxp_hdr *rxp_hdr;
- struct in_device *in_dev;
- size_t rx_size, tx_size;
- int ret, i;
- unsigned int netimr0;
-
- if (netif_msg_ifup(c2_port))
- pr_debug("%s: enabling interface\n", netdev->name);
-
- /* Set the Rx buffer size based on MTU */
- c2_set_rxbufsize(c2_port);
-
- /* Allocate DMA'able memory for Tx/Rx host descriptor rings */
- rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc);
- tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc);
-
- c2_port->mem_size = tx_size + rx_size;
- c2_port->mem = pci_zalloc_consistent(c2dev->pcidev, c2_port->mem_size,
- &c2_port->dma);
- if (c2_port->mem == NULL) {
- pr_debug("Unable to allocate memory for "
- "host descriptor rings\n");
- return -ENOMEM;
- }
-
- /* Create the Rx host descriptor ring */
- if ((ret =
- c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma,
- c2dev->mmio_rxp_ring))) {
- pr_debug("Unable to create RX ring\n");
- goto bail0;
- }
-
- /* Allocate Rx buffers for the host descriptor ring */
- if (c2_rx_fill(c2_port)) {
- pr_debug("Unable to fill RX ring\n");
- goto bail1;
- }
-
- /* Create the Tx host descriptor ring */
- if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size,
- c2_port->dma + rx_size,
- c2dev->mmio_txp_ring))) {
- pr_debug("Unable to create TX ring\n");
- goto bail1;
- }
-
- /* Set the TX pointer to where we left off */
- c2_port->tx_avail = c2_port->tx_ring.count - 1;
- c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean =
- c2_port->tx_ring.start + c2dev->cur_tx;
-
- /* missing: Initialize MAC */
-
- BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean);
-
- /* Reset the adapter, ensures the driver is in sync with the RXP */
- c2_reset(c2_port);
-
- /* Reset the READY bit in the sk_buff RXP headers & adapter HRXDQ */
- for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count;
- i++, elem++) {
- rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
- rxp_hdr->flags = 0;
- __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
- elem->hw_desc + C2_RXP_FLAGS);
- }
-
- /* Enable network packets */
- netif_start_queue(netdev);
-
- /* Enable IRQ */
- writel(0, c2dev->regs + C2_IDIS);
- netimr0 = readl(c2dev->regs + C2_NIMR0);
- netimr0 &= ~(C2_PCI_HTX_INT | C2_PCI_HRX_INT);
- writel(netimr0, c2dev->regs + C2_NIMR0);
-
- /* Tell the stack to ignore arp requests for ipaddrs bound to
- * other interfaces. This is needed to prevent the host stack
- * from responding to arp requests to the ipaddr bound on the
- * rdma interface.
- */
- in_dev = in_dev_get(netdev);
- IN_DEV_CONF_SET(in_dev, ARP_IGNORE, 1);
- in_dev_put(in_dev);
-
- return 0;
-
-bail1:
- c2_rx_clean(c2_port);
- kfree(c2_port->rx_ring.start);
-
-bail0:
- pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
- c2_port->dma);
-
- return ret;
-}
-
-static int c2_down(struct net_device *netdev)
-{
- struct c2_port *c2_port = netdev_priv(netdev);
- struct c2_dev *c2dev = c2_port->c2dev;
-
- if (netif_msg_ifdown(c2_port))
- pr_debug("%s: disabling interface\n",
- netdev->name);
-
- /* Wait for all the queued packets to get sent */
- c2_tx_interrupt(netdev);
-
- /* Disable network packets */
- netif_stop_queue(netdev);
-
- /* Disable IRQs by clearing the interrupt mask */
- writel(1, c2dev->regs + C2_IDIS);
- writel(0, c2dev->regs + C2_NIMR0);
-
- /* missing: Stop transmitter */
-
- /* missing: Stop receiver */
-
- /* Reset the adapter, ensures the driver is in sync with the RXP */
- c2_reset(c2_port);
-
- /* missing: Turn off LEDs here */
-
- /* Free all buffers in the host descriptor rings */
- c2_tx_clean(c2_port);
- c2_rx_clean(c2_port);
-
- /* Free the host descriptor rings */
- kfree(c2_port->rx_ring.start);
- kfree(c2_port->tx_ring.start);
- pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
- c2_port->dma);
-
- return 0;
-}
-
-static void c2_reset(struct c2_port *c2_port)
-{
- struct c2_dev *c2dev = c2_port->c2dev;
- unsigned int cur_rx = c2dev->cur_rx;
-
- /* Tell the hardware to quiesce */
- C2_SET_CUR_RX(c2dev, cur_rx | C2_PCI_HRX_QUI);
-
- /*
- * The hardware will reset the C2_PCI_HRX_QUI bit once
- * the RXP is quiesced. Wait 2 seconds for this.
- */
- ssleep(2);
-
- cur_rx = C2_GET_CUR_RX(c2dev);
-
- if (cur_rx & C2_PCI_HRX_QUI)
- pr_debug("c2_reset: failed to quiesce the hardware!\n");
-
- cur_rx &= ~C2_PCI_HRX_QUI;
-
- c2dev->cur_rx = cur_rx;
-
- pr_debug("Current RX: %u\n", c2dev->cur_rx);
-}
-
-static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
-{
- struct c2_port *c2_port = netdev_priv(netdev);
- struct c2_dev *c2dev = c2_port->c2dev;
- struct c2_ring *tx_ring = &c2_port->tx_ring;
- struct c2_element *elem;
- dma_addr_t mapaddr;
- u32 maplen;
- unsigned long flags;
- unsigned int i;
-
- spin_lock_irqsave(&c2_port->tx_lock, flags);
-
- if (unlikely(c2_port->tx_avail < (skb_shinfo(skb)->nr_frags + 1))) {
- netif_stop_queue(netdev);
- spin_unlock_irqrestore(&c2_port->tx_lock, flags);
-
- pr_debug("%s: Tx ring full when queue awake!\n",
- netdev->name);
- return NETDEV_TX_BUSY;
- }
-
- maplen = skb_headlen(skb);
- mapaddr =
- pci_map_single(c2dev->pcidev, skb->data, maplen, PCI_DMA_TODEVICE);
-
- elem = tx_ring->to_use;
- elem->skb = skb;
- elem->mapaddr = mapaddr;
- elem->maplen = maplen;
-
- /* Tell HW to xmit */
- __raw_writeq((__force u64) cpu_to_be64(mapaddr),
- elem->hw_desc + C2_TXP_ADDR);
- __raw_writew((__force u16) cpu_to_be16(maplen),
- elem->hw_desc + C2_TXP_LEN);
- __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
- elem->hw_desc + C2_TXP_FLAGS);
-
- netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += maplen;
-
- /* Loop thru additional data fragments and queue them */
- if (skb_shinfo(skb)->nr_frags) {
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- maplen = skb_frag_size(frag);
- mapaddr = skb_frag_dma_map(&c2dev->pcidev->dev, frag,
- 0, maplen, DMA_TO_DEVICE);
- elem = elem->next;
- elem->skb = NULL;
- elem->mapaddr = mapaddr;
- elem->maplen = maplen;
-
- /* Tell HW to xmit */
- __raw_writeq((__force u64) cpu_to_be64(mapaddr),
- elem->hw_desc + C2_TXP_ADDR);
- __raw_writew((__force u16) cpu_to_be16(maplen),
- elem->hw_desc + C2_TXP_LEN);
- __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
- elem->hw_desc + C2_TXP_FLAGS);
-
- netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += maplen;
- }
- }
-
- tx_ring->to_use = elem->next;
- c2_port->tx_avail -= (skb_shinfo(skb)->nr_frags + 1);
-
- if (c2_port->tx_avail <= MAX_SKB_FRAGS + 1) {
- netif_stop_queue(netdev);
- if (netif_msg_tx_queued(c2_port))
- pr_debug("%s: transmit queue full\n",
- netdev->name);
- }
-
- spin_unlock_irqrestore(&c2_port->tx_lock, flags);
-
- netdev->trans_start = jiffies;
-
- return NETDEV_TX_OK;
-}
-
-static void c2_tx_timeout(struct net_device *netdev)
-{
- struct c2_port *c2_port = netdev_priv(netdev);
-
- if (netif_msg_timer(c2_port))
- pr_debug("%s: tx timeout\n", netdev->name);
-
- c2_tx_clean(c2_port);
-}
-
-static int c2_change_mtu(struct net_device *netdev, int new_mtu)
-{
- int ret = 0;
-
- if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
- return -EINVAL;
-
- netdev->mtu = new_mtu;
-
- if (netif_running(netdev)) {
- c2_down(netdev);
-
- c2_up(netdev);
- }
-
- return ret;
-}
-
-static const struct net_device_ops c2_netdev = {
- .ndo_open = c2_up,
- .ndo_stop = c2_down,
- .ndo_start_xmit = c2_xmit_frame,
- .ndo_tx_timeout = c2_tx_timeout,
- .ndo_change_mtu = c2_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/* Initialize network device */
-static struct net_device *c2_devinit(struct c2_dev *c2dev,
- void __iomem * mmio_addr)
-{
- struct c2_port *c2_port = NULL;
- struct net_device *netdev = alloc_etherdev(sizeof(*c2_port));
-
- if (!netdev) {
- pr_debug("c2_port etherdev alloc failed");
- return NULL;
- }
-
- SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev);
-
- netdev->netdev_ops = &c2_netdev;
- netdev->watchdog_timeo = C2_TX_TIMEOUT;
- netdev->irq = c2dev->pcidev->irq;
-
- c2_port = netdev_priv(netdev);
- c2_port->netdev = netdev;
- c2_port->c2dev = c2dev;
- c2_port->msg_enable = netif_msg_init(debug, default_msg);
- c2_port->tx_ring.count = C2_NUM_TX_DESC;
- c2_port->rx_ring.count = C2_NUM_RX_DESC;
-
- spin_lock_init(&c2_port->tx_lock);
-
- /* Copy our 48-bit ethernet hardware address */
- memcpy_fromio(netdev->dev_addr, mmio_addr + C2_REGS_ENADDR, 6);
-
- /* Validate the MAC address */
- if (!is_valid_ether_addr(netdev->dev_addr)) {
- pr_debug("Invalid MAC Address\n");
- pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name,
- netdev->dev_addr, netdev->irq);
- free_netdev(netdev);
- return NULL;
- }
-
- c2dev->netdev = netdev;
-
- return netdev;
-}
-
-static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
-{
- int ret = 0, i;
- unsigned long reg0_start, reg0_flags, reg0_len;
- unsigned long reg2_start, reg2_flags, reg2_len;
- unsigned long reg4_start, reg4_flags, reg4_len;
- unsigned kva_map_size;
- struct net_device *netdev = NULL;
- struct c2_dev *c2dev = NULL;
- void __iomem *mmio_regs = NULL;
-
- printk(KERN_INFO PFX "AMSO1100 Gigabit Ethernet driver v%s loaded\n",
- DRV_VERSION);
-
- /* Enable PCI device */
- ret = pci_enable_device(pcidev);
- if (ret) {
- printk(KERN_ERR PFX "%s: Unable to enable PCI device\n",
- pci_name(pcidev));
- goto bail0;
- }
-
- reg0_start = pci_resource_start(pcidev, BAR_0);
- reg0_len = pci_resource_len(pcidev, BAR_0);
- reg0_flags = pci_resource_flags(pcidev, BAR_0);
-
- reg2_start = pci_resource_start(pcidev, BAR_2);
- reg2_len = pci_resource_len(pcidev, BAR_2);
- reg2_flags = pci_resource_flags(pcidev, BAR_2);
-
- reg4_start = pci_resource_start(pcidev, BAR_4);
- reg4_len = pci_resource_len(pcidev, BAR_4);
- reg4_flags = pci_resource_flags(pcidev, BAR_4);
-
- pr_debug("BAR0 size = 0x%lX bytes\n", reg0_len);
- pr_debug("BAR2 size = 0x%lX bytes\n", reg2_len);
- pr_debug("BAR4 size = 0x%lX bytes\n", reg4_len);
-
- /* Make sure PCI base addr are MMIO */
- if (!(reg0_flags & IORESOURCE_MEM) ||
- !(reg2_flags & IORESOURCE_MEM) || !(reg4_flags & IORESOURCE_MEM)) {
- printk(KERN_ERR PFX "PCI regions not an MMIO resource\n");
- ret = -ENODEV;
- goto bail1;
- }
-
- /* Check for weird/broken PCI region reporting */
- if ((reg0_len < C2_REG0_SIZE) ||
- (reg2_len < C2_REG2_SIZE) || (reg4_len < C2_REG4_SIZE)) {
- printk(KERN_ERR PFX "Invalid PCI region sizes\n");
- ret = -ENODEV;
- goto bail1;
- }
-
- /* Reserve PCI I/O and memory resources */
- ret = pci_request_regions(pcidev, DRV_NAME);
- if (ret) {
- printk(KERN_ERR PFX "%s: Unable to request regions\n",
- pci_name(pcidev));
- goto bail1;
- }
-
- if ((sizeof(dma_addr_t) > 4)) {
- ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
- if (ret < 0) {
- printk(KERN_ERR PFX "64b DMA configuration failed\n");
- goto bail2;
- }
- } else {
- ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
- if (ret < 0) {
- printk(KERN_ERR PFX "32b DMA configuration failed\n");
- goto bail2;
- }
- }
-
- /* Enables bus-mastering on the device */
- pci_set_master(pcidev);
-
- /* Remap the adapter PCI registers in BAR4 */
- mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
- sizeof(struct c2_adapter_pci_regs));
- if (!mmio_regs) {
- printk(KERN_ERR PFX
- "Unable to remap adapter PCI registers in BAR4\n");
- ret = -EIO;
- goto bail2;
- }
-
- /* Validate PCI regs magic */
- for (i = 0; i < sizeof(c2_magic); i++) {
- if (c2_magic[i] != readb(mmio_regs + C2_REGS_MAGIC + i)) {
- printk(KERN_ERR PFX "Downlevel Firmware boot loader "
- "[%d/%Zd: got 0x%x, exp 0x%x]. Use the cc_flash "
- "utility to update your boot loader\n",
- i + 1, sizeof(c2_magic),
- readb(mmio_regs + C2_REGS_MAGIC + i),
- c2_magic[i]);
- printk(KERN_ERR PFX "Adapter not claimed\n");
- iounmap(mmio_regs);
- ret = -EIO;
- goto bail2;
- }
- }
-
- /* Validate the adapter version */
- if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) {
- printk(KERN_ERR PFX "Version mismatch "
- "[fw=%u, c2=%u], Adapter not claimed\n",
- be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)),
- C2_VERSION);
- ret = -EINVAL;
- iounmap(mmio_regs);
- goto bail2;
- }
-
- /* Validate the adapter IVN */
- if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) {
- printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using "
- "the OpenIB device support kit. "
- "[fw=0x%x, c2=0x%x], Adapter not claimed\n",
- be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)),
- C2_IVN);
- ret = -EINVAL;
- iounmap(mmio_regs);
- goto bail2;
- }
-
- /* Allocate hardware structure */
- c2dev = (struct c2_dev *) ib_alloc_device(sizeof(*c2dev));
- if (!c2dev) {
- printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n",
- pci_name(pcidev));
- ret = -ENOMEM;
- iounmap(mmio_regs);
- goto bail2;
- }
-
- memset(c2dev, 0, sizeof(*c2dev));
- spin_lock_init(&c2dev->lock);
- c2dev->pcidev = pcidev;
- c2dev->cur_tx = 0;
-
- /* Get the last RX index */
- c2dev->cur_rx =
- (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_HRX_CUR)) -
- 0xffffc000) / sizeof(struct c2_rxp_desc);
-
- /* Request an interrupt line for the driver */
- ret = request_irq(pcidev->irq, c2_interrupt, IRQF_SHARED, DRV_NAME, c2dev);
- if (ret) {
- printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n",
- pci_name(pcidev), pcidev->irq);
- iounmap(mmio_regs);
- goto bail3;
- }
-
- /* Set driver specific data */
- pci_set_drvdata(pcidev, c2dev);
-
- /* Initialize network device */
- if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) {
- ret = -ENOMEM;
- iounmap(mmio_regs);
- goto bail4;
- }
-
- /* Save off the actual size prior to unmapping mmio_regs */
- kva_map_size = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_PCI_WINSIZE));
-
- /* Unmap the adapter PCI registers in BAR4 */
- iounmap(mmio_regs);
-
- /* Register network device */
- ret = register_netdev(netdev);
- if (ret) {
- printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n",
- ret);
- goto bail5;
- }
-
- /* Disable network packets */
- netif_stop_queue(netdev);
-
- /* Remap the adapter HRXDQ PA space to kernel VA space */
- c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET,
- C2_RXP_HRXDQ_SIZE);
- if (!c2dev->mmio_rxp_ring) {
- printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n");
- ret = -EIO;
- goto bail6;
- }
-
- /* Remap the adapter HTXDQ PA space to kernel VA space */
- c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET,
- C2_TXP_HTXDQ_SIZE);
- if (!c2dev->mmio_txp_ring) {
- printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n");
- ret = -EIO;
- goto bail7;
- }
-
- /* Save off the current RX index in the last 4 bytes of the TXP Ring */
- C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
-
- /* Remap the PCI registers in adapter BAR0 to kernel VA space */
- c2dev->regs = ioremap_nocache(reg0_start, reg0_len);
- if (!c2dev->regs) {
- printk(KERN_ERR PFX "Unable to remap BAR0\n");
- ret = -EIO;
- goto bail8;
- }
-
- /* Remap the PCI registers in adapter BAR4 to kernel VA space */
- c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET;
- c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
- kva_map_size);
- if (!c2dev->kva) {
- printk(KERN_ERR PFX "Unable to remap BAR4\n");
- ret = -EIO;
- goto bail9;
- }
-
- /* Print out the MAC address */
- pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name, netdev->dev_addr,
- netdev->irq);
-
- ret = c2_rnic_init(c2dev);
- if (ret) {
- printk(KERN_ERR PFX "c2_rnic_init failed: %d\n", ret);
- goto bail10;
- }
-
- ret = c2_register_device(c2dev);
- if (ret)
- goto bail10;
-
- return 0;
-
- bail10:
- iounmap(c2dev->kva);
-
- bail9:
- iounmap(c2dev->regs);
-
- bail8:
- iounmap(c2dev->mmio_txp_ring);
-
- bail7:
- iounmap(c2dev->mmio_rxp_ring);
-
- bail6:
- unregister_netdev(netdev);
-
- bail5:
- free_netdev(netdev);
-
- bail4:
- free_irq(pcidev->irq, c2dev);
-
- bail3:
- ib_dealloc_device(&c2dev->ibdev);
-
- bail2:
- pci_release_regions(pcidev);
-
- bail1:
- pci_disable_device(pcidev);
-
- bail0:
- return ret;
-}
-
-static void c2_remove(struct pci_dev *pcidev)
-{
- struct c2_dev *c2dev = pci_get_drvdata(pcidev);
- struct net_device *netdev = c2dev->netdev;
-
- /* Unregister with OpenIB */
- c2_unregister_device(c2dev);
-
- /* Clean up the RNIC resources */
- c2_rnic_term(c2dev);
-
- /* Remove network device from the kernel */
- unregister_netdev(netdev);
-
- /* Free network device */
- free_netdev(netdev);
-
- /* Free the interrupt line */
- free_irq(pcidev->irq, c2dev);
-
- /* missing: Turn LEDs off here */
-
- /* Unmap adapter PA space */
- iounmap(c2dev->kva);
- iounmap(c2dev->regs);
- iounmap(c2dev->mmio_txp_ring);
- iounmap(c2dev->mmio_rxp_ring);
-
- /* Free the hardware structure */
- ib_dealloc_device(&c2dev->ibdev);
-
- /* Release reserved PCI I/O and memory resources */
- pci_release_regions(pcidev);
-
- /* Disable PCI device */
- pci_disable_device(pcidev);
-
- /* Clear driver specific data */
- pci_set_drvdata(pcidev, NULL);
-}
-
-static struct pci_driver c2_pci_driver = {
- .name = DRV_NAME,
- .id_table = c2_pci_table,
- .probe = c2_probe,
- .remove = c2_remove,
-};
-
-module_pci_driver(c2_pci_driver);
diff --git a/drivers/staging/rdma/amso1100/c2.h b/drivers/staging/rdma/amso1100/c2.h
deleted file mode 100644
index 21b565a91fd6..000000000000
--- a/drivers/staging/rdma/amso1100/c2.h
+++ /dev/null
@@ -1,547 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef __C2_H
-#define __C2_H
-
-#include <linux/netdevice.h>
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/idr.h>
-
-#include "c2_provider.h"
-#include "c2_mq.h"
-#include "c2_status.h"
-
-#define DRV_NAME "c2"
-#define DRV_VERSION "1.1"
-#define PFX DRV_NAME ": "
-
-#define BAR_0 0
-#define BAR_2 2
-#define BAR_4 4
-
-#define RX_BUF_SIZE (1536 + 8)
-#define ETH_JUMBO_MTU 9000
-#define C2_MAGIC "CEPHEUS"
-#define C2_VERSION 4
-#define C2_IVN (18 & 0x7fffffff)
-
-#define C2_REG0_SIZE (16 * 1024)
-#define C2_REG2_SIZE (2 * 1024 * 1024)
-#define C2_REG4_SIZE (256 * 1024 * 1024)
-#define C2_NUM_TX_DESC 341
-#define C2_NUM_RX_DESC 256
-#define C2_PCI_REGS_OFFSET (0x10000)
-#define C2_RXP_HRXDQ_OFFSET (((C2_REG4_SIZE)/2))
-#define C2_RXP_HRXDQ_SIZE (4096)
-#define C2_TXP_HTXDQ_OFFSET (((C2_REG4_SIZE)/2) + C2_RXP_HRXDQ_SIZE)
-#define C2_TXP_HTXDQ_SIZE (4096)
-#define C2_TX_TIMEOUT (6*HZ)
-
-/* CEPHEUS */
-static const u8 c2_magic[] = {
- 0x43, 0x45, 0x50, 0x48, 0x45, 0x55, 0x53
-};
-
-enum adapter_pci_regs {
- C2_REGS_MAGIC = 0x0000,
- C2_REGS_VERS = 0x0008,
- C2_REGS_IVN = 0x000C,
- C2_REGS_PCI_WINSIZE = 0x0010,
- C2_REGS_Q0_QSIZE = 0x0014,
- C2_REGS_Q0_MSGSIZE = 0x0018,
- C2_REGS_Q0_POOLSTART = 0x001C,
- C2_REGS_Q0_SHARED = 0x0020,
- C2_REGS_Q1_QSIZE = 0x0024,
- C2_REGS_Q1_MSGSIZE = 0x0028,
- C2_REGS_Q1_SHARED = 0x0030,
- C2_REGS_Q2_QSIZE = 0x0034,
- C2_REGS_Q2_MSGSIZE = 0x0038,
- C2_REGS_Q2_SHARED = 0x0040,
- C2_REGS_ENADDR = 0x004C,
- C2_REGS_RDMA_ENADDR = 0x0054,
- C2_REGS_HRX_CUR = 0x006C,
-};
-
-struct c2_adapter_pci_regs {
- char reg_magic[8];
- u32 version;
- u32 ivn;
- u32 pci_window_size;
- u32 q0_q_size;
- u32 q0_msg_size;
- u32 q0_pool_start;
- u32 q0_shared;
- u32 q1_q_size;
- u32 q1_msg_size;
- u32 q1_pool_start;
- u32 q1_shared;
- u32 q2_q_size;
- u32 q2_msg_size;
- u32 q2_pool_start;
- u32 q2_shared;
- u32 log_start;
- u32 log_size;
- u8 host_enaddr[8];
- u8 rdma_enaddr[8];
- u32 crash_entry;
- u32 crash_ready[2];
- u32 fw_txd_cur;
- u32 fw_hrxd_cur;
- u32 fw_rxd_cur;
-};
-
-enum pci_regs {
- C2_HISR = 0x0000,
- C2_DISR = 0x0004,
- C2_HIMR = 0x0008,
- C2_DIMR = 0x000C,
- C2_NISR0 = 0x0010,
- C2_NISR1 = 0x0014,
- C2_NIMR0 = 0x0018,
- C2_NIMR1 = 0x001C,
- C2_IDIS = 0x0020,
-};
-
-enum {
- C2_PCI_HRX_INT = 1 << 8,
- C2_PCI_HTX_INT = 1 << 17,
- C2_PCI_HRX_QUI = 1 << 31,
-};
-
-/*
- * Cepheus registers in BAR0.
- */
-struct c2_pci_regs {
- u32 hostisr;
- u32 dmaisr;
- u32 hostimr;
- u32 dmaimr;
- u32 netisr0;
- u32 netisr1;
- u32 netimr0;
- u32 netimr1;
- u32 int_disable;
-};
-
-/* TXP flags */
-enum c2_txp_flags {
- TXP_HTXD_DONE = 0,
- TXP_HTXD_READY = 1 << 0,
- TXP_HTXD_UNINIT = 1 << 1,
-};
-
-/* RXP flags */
-enum c2_rxp_flags {
- RXP_HRXD_UNINIT = 0,
- RXP_HRXD_READY = 1 << 0,
- RXP_HRXD_DONE = 1 << 1,
-};
-
-/* RXP status */
-enum c2_rxp_status {
- RXP_HRXD_ZERO = 0,
- RXP_HRXD_OK = 1 << 0,
- RXP_HRXD_BUF_OV = 1 << 1,
-};
-
-/* TXP descriptor fields */
-enum txp_desc {
- C2_TXP_FLAGS = 0x0000,
- C2_TXP_LEN = 0x0002,
- C2_TXP_ADDR = 0x0004,
-};
-
-/* RXP descriptor fields */
-enum rxp_desc {
- C2_RXP_FLAGS = 0x0000,
- C2_RXP_STATUS = 0x0002,
- C2_RXP_COUNT = 0x0004,
- C2_RXP_LEN = 0x0006,
- C2_RXP_ADDR = 0x0008,
-};
-
-struct c2_txp_desc {
- u16 flags;
- u16 len;
- u64 addr;
-} __attribute__ ((packed));
-
-struct c2_rxp_desc {
- u16 flags;
- u16 status;
- u16 count;
- u16 len;
- u64 addr;
-} __attribute__ ((packed));
-
-struct c2_rxp_hdr {
- u16 flags;
- u16 status;
- u16 len;
- u16 rsvd;
-} __attribute__ ((packed));
-
-struct c2_tx_desc {
- u32 len;
- u32 status;
- dma_addr_t next_offset;
-};
-
-struct c2_rx_desc {
- u32 len;
- u32 status;
- dma_addr_t next_offset;
-};
-
-struct c2_alloc {
- u32 last;
- u32 max;
- spinlock_t lock;
- unsigned long *table;
-};
-
-struct c2_array {
- struct {
- void **page;
- int used;
- } *page_list;
-};
-
-/*
- * The MQ shared pointer pool is organized as a linked list of
- * chunks. Each chunk contains a linked list of free shared pointers
- * that can be allocated to a given user mode client.
- *
- */
-struct sp_chunk {
- struct sp_chunk *next;
- dma_addr_t dma_addr;
- DEFINE_DMA_UNMAP_ADDR(mapping);
- u16 head;
- u16 shared_ptr[0];
-};
-
-struct c2_pd_table {
- u32 last;
- u32 max;
- spinlock_t lock;
- unsigned long *table;
-};
-
-struct c2_qp_table {
- struct idr idr;
- spinlock_t lock;
-};
-
-struct c2_element {
- struct c2_element *next;
- void *ht_desc; /* host descriptor */
- void __iomem *hw_desc; /* hardware descriptor */
- struct sk_buff *skb;
- dma_addr_t mapaddr;
- u32 maplen;
-};
-
-struct c2_ring {
- struct c2_element *to_clean;
- struct c2_element *to_use;
- struct c2_element *start;
- unsigned long count;
-};
-
-struct c2_dev {
- struct ib_device ibdev;
- void __iomem *regs;
- void __iomem *mmio_txp_ring; /* remapped adapter memory for hw rings */
- void __iomem *mmio_rxp_ring;
- spinlock_t lock;
- struct pci_dev *pcidev;
- struct net_device *netdev;
- struct net_device *pseudo_netdev;
- unsigned int cur_tx;
- unsigned int cur_rx;
- u32 adapter_handle;
- int device_cap_flags;
- void __iomem *kva; /* KVA device memory */
- unsigned long pa; /* PA device memory */
- void **qptr_array;
-
- struct kmem_cache *host_msg_cache;
-
- struct list_head cca_link; /* adapter list */
- struct list_head eh_wakeup_list; /* event wakeup list */
- wait_queue_head_t req_vq_wo;
-
- /* Cached RNIC properties */
- struct ib_device_attr props;
-
- struct c2_pd_table pd_table;
- struct c2_qp_table qp_table;
- int ports; /* num of GigE ports */
- int devnum;
- spinlock_t vqlock; /* sync vbs req MQ */
-
- /* Verbs Queues */
- struct c2_mq req_vq; /* Verbs Request MQ */
- struct c2_mq rep_vq; /* Verbs Reply MQ */
- struct c2_mq aeq; /* Async Events MQ */
-
- /* Kernel client MQs */
- struct sp_chunk *kern_mqsp_pool;
-
- /* Device updates these values when posting messages to a host
- * target queue */
- u16 req_vq_shared;
- u16 rep_vq_shared;
- u16 aeq_shared;
- u16 irq_claimed;
-
- /*
- * Shared host target pages for user-accessible MQs.
- */
- int hthead; /* index of first free entry */
- void *htpages; /* kernel vaddr */
- int htlen; /* length of htpages memory */
- void *htuva; /* user mapped vaddr */
- spinlock_t htlock; /* serialize allocation */
-
- u64 adapter_hint_uva; /* access to the activity FIFO */
-
- // spinlock_t aeq_lock;
- // spinlock_t rnic_lock;
-
- __be16 *hint_count;
- dma_addr_t hint_count_dma;
- u16 hints_read;
-
- int init; /* TRUE if it's ready */
- char ae_cache_name[16];
- char vq_cache_name[16];
-};
-
-struct c2_port {
- u32 msg_enable;
- struct c2_dev *c2dev;
- struct net_device *netdev;
-
- spinlock_t tx_lock;
- u32 tx_avail;
- struct c2_ring tx_ring;
- struct c2_ring rx_ring;
-
- void *mem; /* PCI memory for host rings */
- dma_addr_t dma;
- unsigned long mem_size;
-
- u32 rx_buf_size;
-};
-
-/*
- * Activity FIFO registers in BAR0.
- */
-#define PCI_BAR0_HOST_HINT 0x100
-#define PCI_BAR0_ADAPTER_HINT 0x2000
-
-/*
- * Ammasso PCI vendor id and Cepheus PCI device id.
- */
-#define CQ_ARMED 0x01
-#define CQ_WAIT_FOR_DMA 0x80
-
-/*
- * The format of a hint is as follows:
- * Lower 16 bits are the count of hints for the queue.
- * Next 15 bits are the qp_index
- * Upper most bit depends on who reads it:
- * If read by producer, then it means Full (1) or Not-Full (0)
- * If read by consumer, then it means Empty (1) or Not-Empty (0)
- */
-#define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count)
-#define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16)
-#define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF)
-
-
-/*
- * The following defines the offset in SDRAM for the c2_adapter_pci_regs_t
- * struct.
- */
-#define C2_ADAPTER_PCI_REGS_OFFSET 0x10000
-
-#ifndef readq
-static inline u64 readq(const void __iomem * addr)
-{
- u64 ret = readl(addr + 4);
- ret <<= 32;
- ret |= readl(addr);
-
- return ret;
-}
-#endif
-
-#ifndef writeq
-static inline void __raw_writeq(u64 val, void __iomem * addr)
-{
- __raw_writel((u32) (val), addr);
- __raw_writel((u32) (val >> 32), (addr + 4));
-}
-#endif
-
-#define C2_SET_CUR_RX(c2dev, cur_rx) \
- __raw_writel((__force u32) cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092)
-
-#define C2_GET_CUR_RX(c2dev) \
- be32_to_cpu((__force __be32) readl(c2dev->mmio_txp_ring + 4092))
-
-static inline struct c2_dev *to_c2dev(struct ib_device *ibdev)
-{
- return container_of(ibdev, struct c2_dev, ibdev);
-}
-
-static inline int c2_errno(void *reply)
-{
- switch (c2_wr_get_result(reply)) {
- case C2_OK:
- return 0;
- case CCERR_NO_BUFS:
- case CCERR_INSUFFICIENT_RESOURCES:
- case CCERR_ZERO_RDMA_READ_RESOURCES:
- return -ENOMEM;
- case CCERR_MR_IN_USE:
- case CCERR_QP_IN_USE:
- return -EBUSY;
- case CCERR_ADDR_IN_USE:
- return -EADDRINUSE;
- case CCERR_ADDR_NOT_AVAIL:
- return -EADDRNOTAVAIL;
- case CCERR_CONN_RESET:
- return -ECONNRESET;
- case CCERR_NOT_IMPLEMENTED:
- case CCERR_INVALID_WQE:
- return -ENOSYS;
- case CCERR_QP_NOT_PRIVILEGED:
- return -EPERM;
- case CCERR_STACK_ERROR:
- return -EPROTO;
- case CCERR_ACCESS_VIOLATION:
- case CCERR_BASE_AND_BOUNDS_VIOLATION:
- return -EFAULT;
- case CCERR_STAG_STATE_NOT_INVALID:
- case CCERR_INVALID_ADDRESS:
- case CCERR_INVALID_CQ:
- case CCERR_INVALID_EP:
- case CCERR_INVALID_MODIFIER:
- case CCERR_INVALID_MTU:
- case CCERR_INVALID_PD_ID:
- case CCERR_INVALID_QP:
- case CCERR_INVALID_RNIC:
- case CCERR_INVALID_STAG:
- return -EINVAL;
- default:
- return -EAGAIN;
- }
-}
-
-/* Device */
-int c2_register_device(struct c2_dev *c2dev);
-void c2_unregister_device(struct c2_dev *c2dev);
-int c2_rnic_init(struct c2_dev *c2dev);
-void c2_rnic_term(struct c2_dev *c2dev);
-void c2_rnic_interrupt(struct c2_dev *c2dev);
-int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
-int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
-
-/* QPs */
-int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd,
- struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp);
-void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp);
-struct ib_qp *c2_get_qp(struct ib_device *device, int qpn);
-int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
- struct ib_qp_attr *attr, int attr_mask);
-int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
- int ord, int ird);
-int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
- struct ib_send_wr **bad_wr);
-int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
- struct ib_recv_wr **bad_wr);
-void c2_init_qp_table(struct c2_dev *c2dev);
-void c2_cleanup_qp_table(struct c2_dev *c2dev);
-void c2_set_qp_state(struct c2_qp *, int);
-struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn);
-
-/* PDs */
-int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd);
-void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd);
-int c2_init_pd_table(struct c2_dev *c2dev);
-void c2_cleanup_pd_table(struct c2_dev *c2dev);
-
-/* CQs */
-int c2_init_cq(struct c2_dev *c2dev, int entries,
- struct c2_ucontext *ctx, struct c2_cq *cq);
-void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq);
-void c2_cq_event(struct c2_dev *c2dev, u32 mq_index);
-void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
-int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
-int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
-
-/* CM */
-int c2_llp_connect(struct iw_cm_id *cm_id,
- struct iw_cm_conn_param *iw_param);
-int c2_llp_accept(struct iw_cm_id *cm_id,
- struct iw_cm_conn_param *iw_param);
-int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata,
- u8 pdata_len);
-int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog);
-int c2_llp_service_destroy(struct iw_cm_id *cm_id);
-
-/* MM */
-int c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
- int page_size, int pbl_depth, u32 length,
- u32 off, u64 *va, enum c2_acf acf,
- struct c2_mr *mr);
-int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index);
-
-/* AE */
-void c2_ae_event(struct c2_dev *c2dev, u32 mq_index);
-
-/* MQSP Allocator */
-int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
- struct sp_chunk **root);
-void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root);
-__be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
- dma_addr_t *dma_addr, gfp_t gfp_mask);
-void c2_free_mqsp(__be16* mqsp);
-#endif
diff --git a/drivers/staging/rdma/amso1100/c2_ae.c b/drivers/staging/rdma/amso1100/c2_ae.c
deleted file mode 100644
index eb7a92b2692f..000000000000
--- a/drivers/staging/rdma/amso1100/c2_ae.c
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "c2.h"
-#include <rdma/iw_cm.h>
-#include "c2_status.h"
-#include "c2_ae.h"
-
-static int c2_convert_cm_status(u32 c2_status)
-{
- switch (c2_status) {
- case C2_CONN_STATUS_SUCCESS:
- return 0;
- case C2_CONN_STATUS_REJECTED:
- return -ENETRESET;
- case C2_CONN_STATUS_REFUSED:
- return -ECONNREFUSED;
- case C2_CONN_STATUS_TIMEDOUT:
- return -ETIMEDOUT;
- case C2_CONN_STATUS_NETUNREACH:
- return -ENETUNREACH;
- case C2_CONN_STATUS_HOSTUNREACH:
- return -EHOSTUNREACH;
- case C2_CONN_STATUS_INVALID_RNIC:
- return -EINVAL;
- case C2_CONN_STATUS_INVALID_QP:
- return -EINVAL;
- case C2_CONN_STATUS_INVALID_QP_STATE:
- return -EINVAL;
- case C2_CONN_STATUS_ADDR_NOT_AVAIL:
- return -EADDRNOTAVAIL;
- default:
- printk(KERN_ERR PFX
- "%s - Unable to convert CM status: %d\n",
- __func__, c2_status);
- return -EIO;
- }
-}
-
-static const char* to_event_str(int event)
-{
- static const char* event_str[] = {
- "CCAE_REMOTE_SHUTDOWN",
- "CCAE_ACTIVE_CONNECT_RESULTS",
- "CCAE_CONNECTION_REQUEST",
- "CCAE_LLP_CLOSE_COMPLETE",
- "CCAE_TERMINATE_MESSAGE_RECEIVED",
- "CCAE_LLP_CONNECTION_RESET",
- "CCAE_LLP_CONNECTION_LOST",
- "CCAE_LLP_SEGMENT_SIZE_INVALID",
- "CCAE_LLP_INVALID_CRC",
- "CCAE_LLP_BAD_FPDU",
- "CCAE_INVALID_DDP_VERSION",
- "CCAE_INVALID_RDMA_VERSION",
- "CCAE_UNEXPECTED_OPCODE",
- "CCAE_INVALID_DDP_QUEUE_NUMBER",
- "CCAE_RDMA_READ_NOT_ENABLED",
- "CCAE_RDMA_WRITE_NOT_ENABLED",
- "CCAE_RDMA_READ_TOO_SMALL",
- "CCAE_NO_L_BIT",
- "CCAE_TAGGED_INVALID_STAG",
- "CCAE_TAGGED_BASE_BOUNDS_VIOLATION",
- "CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION",
- "CCAE_TAGGED_INVALID_PD",
- "CCAE_WRAP_ERROR",
- "CCAE_BAD_CLOSE",
- "CCAE_BAD_LLP_CLOSE",
- "CCAE_INVALID_MSN_RANGE",
- "CCAE_INVALID_MSN_GAP",
- "CCAE_IRRQ_OVERFLOW",
- "CCAE_IRRQ_MSN_GAP",
- "CCAE_IRRQ_MSN_RANGE",
- "CCAE_IRRQ_INVALID_STAG",
- "CCAE_IRRQ_BASE_BOUNDS_VIOLATION",
- "CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION",
- "CCAE_IRRQ_INVALID_PD",
- "CCAE_IRRQ_WRAP_ERROR",
- "CCAE_CQ_SQ_COMPLETION_OVERFLOW",
- "CCAE_CQ_RQ_COMPLETION_ERROR",
- "CCAE_QP_SRQ_WQE_ERROR",
- "CCAE_QP_LOCAL_CATASTROPHIC_ERROR",
- "CCAE_CQ_OVERFLOW",
- "CCAE_CQ_OPERATION_ERROR",
- "CCAE_SRQ_LIMIT_REACHED",
- "CCAE_QP_RQ_LIMIT_REACHED",
- "CCAE_SRQ_CATASTROPHIC_ERROR",
- "CCAE_RNIC_CATASTROPHIC_ERROR"
- };
-
- if (event < CCAE_REMOTE_SHUTDOWN ||
- event > CCAE_RNIC_CATASTROPHIC_ERROR)
- return "<invalid event>";
-
- event -= CCAE_REMOTE_SHUTDOWN;
- return event_str[event];
-}
-
-static const char *to_qp_state_str(int state)
-{
- switch (state) {
- case C2_QP_STATE_IDLE:
- return "C2_QP_STATE_IDLE";
- case C2_QP_STATE_CONNECTING:
- return "C2_QP_STATE_CONNECTING";
- case C2_QP_STATE_RTS:
- return "C2_QP_STATE_RTS";
- case C2_QP_STATE_CLOSING:
- return "C2_QP_STATE_CLOSING";
- case C2_QP_STATE_TERMINATE:
- return "C2_QP_STATE_TERMINATE";
- case C2_QP_STATE_ERROR:
- return "C2_QP_STATE_ERROR";
- default:
- return "<invalid QP state>";
- }
-}
-
-void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
-{
- struct c2_mq *mq = c2dev->qptr_array[mq_index];
- union c2wr *wr;
- void *resource_user_context;
- struct iw_cm_event cm_event;
- struct ib_event ib_event;
- enum c2_resource_indicator resource_indicator;
- enum c2_event_id event_id;
- unsigned long flags;
- int status;
- struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_event.local_addr;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_event.remote_addr;
-
- /*
- * retrieve the message
- */
- wr = c2_mq_consume(mq);
- if (!wr)
- return;
-
- memset(&ib_event, 0, sizeof(ib_event));
- memset(&cm_event, 0, sizeof(cm_event));
-
- event_id = c2_wr_get_id(wr);
- resource_indicator = be32_to_cpu(wr->ae.ae_generic.resource_type);
- resource_user_context =
- (void *) (unsigned long) wr->ae.ae_generic.user_context;
-
- status = cm_event.status = c2_convert_cm_status(c2_wr_get_result(wr));
-
- pr_debug("event received c2_dev=%p, event_id=%d, "
- "resource_indicator=%d, user_context=%p, status = %d\n",
- c2dev, event_id, resource_indicator, resource_user_context,
- status);
-
- switch (resource_indicator) {
- case C2_RES_IND_QP:{
-
- struct c2_qp *qp = resource_user_context;
- struct iw_cm_id *cm_id = qp->cm_id;
- struct c2wr_ae_active_connect_results *res;
-
- if (!cm_id) {
- pr_debug("event received, but cm_id is <nul>, qp=%p!\n",
- qp);
- goto ignore_it;
- }
- pr_debug("%s: event = %s, user_context=%llx, "
- "resource_type=%x, "
- "resource=%x, qp_state=%s\n",
- __func__,
- to_event_str(event_id),
- (unsigned long long) wr->ae.ae_generic.user_context,
- be32_to_cpu(wr->ae.ae_generic.resource_type),
- be32_to_cpu(wr->ae.ae_generic.resource),
- to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state)));
-
- c2_set_qp_state(qp, be32_to_cpu(wr->ae.ae_generic.qp_state));
-
- switch (event_id) {
- case CCAE_ACTIVE_CONNECT_RESULTS:
- res = &wr->ae.ae_active_connect_results;
- cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
- laddr->sin_addr.s_addr = res->laddr;
- raddr->sin_addr.s_addr = res->raddr;
- laddr->sin_port = res->lport;
- raddr->sin_port = res->rport;
- if (status == 0) {
- cm_event.private_data_len =
- be32_to_cpu(res->private_data_length);
- cm_event.private_data = res->private_data;
- } else {
- spin_lock_irqsave(&qp->lock, flags);
- if (qp->cm_id) {
- qp->cm_id->rem_ref(qp->cm_id);
- qp->cm_id = NULL;
- }
- spin_unlock_irqrestore(&qp->lock, flags);
- cm_event.private_data_len = 0;
- cm_event.private_data = NULL;
- }
- if (cm_id->event_handler)
- cm_id->event_handler(cm_id, &cm_event);
- break;
- case CCAE_TERMINATE_MESSAGE_RECEIVED:
- case CCAE_CQ_SQ_COMPLETION_OVERFLOW:
- ib_event.device = &c2dev->ibdev;
- ib_event.element.qp = &qp->ibqp;
- ib_event.event = IB_EVENT_QP_REQ_ERR;
-
- if (qp->ibqp.event_handler)
- qp->ibqp.event_handler(&ib_event,
- qp->ibqp.
- qp_context);
- break;
- case CCAE_BAD_CLOSE:
- case CCAE_LLP_CLOSE_COMPLETE:
- case CCAE_LLP_CONNECTION_RESET:
- case CCAE_LLP_CONNECTION_LOST:
- BUG_ON(cm_id->event_handler==(void*)0x6b6b6b6b);
-
- spin_lock_irqsave(&qp->lock, flags);
- if (qp->cm_id) {
- qp->cm_id->rem_ref(qp->cm_id);
- qp->cm_id = NULL;
- }
- spin_unlock_irqrestore(&qp->lock, flags);
- cm_event.event = IW_CM_EVENT_CLOSE;
- cm_event.status = 0;
- if (cm_id->event_handler)
- cm_id->event_handler(cm_id, &cm_event);
- break;
- default:
- BUG_ON(1);
- pr_debug("%s:%d Unexpected event_id=%d on QP=%p, "
- "CM_ID=%p\n",
- __func__, __LINE__,
- event_id, qp, cm_id);
- break;
- }
- break;
- }
-
- case C2_RES_IND_EP:{
-
- struct c2wr_ae_connection_request *req =
- &wr->ae.ae_connection_request;
- struct iw_cm_id *cm_id =
- resource_user_context;
-
- pr_debug("C2_RES_IND_EP event_id=%d\n", event_id);
- if (event_id != CCAE_CONNECTION_REQUEST) {
- pr_debug("%s: Invalid event_id: %d\n",
- __func__, event_id);
- break;
- }
- cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
- cm_event.provider_data = (void*)(unsigned long)req->cr_handle;
- laddr->sin_addr.s_addr = req->laddr;
- raddr->sin_addr.s_addr = req->raddr;
- laddr->sin_port = req->lport;
- raddr->sin_port = req->rport;
- cm_event.private_data_len =
- be32_to_cpu(req->private_data_length);
- cm_event.private_data = req->private_data;
- /*
- * Until ird/ord negotiation via MPAv2 support is added, send
- * max supported values
- */
- cm_event.ird = cm_event.ord = 128;
-
- if (cm_id->event_handler)
- cm_id->event_handler(cm_id, &cm_event);
- break;
- }
-
- case C2_RES_IND_CQ:{
- struct c2_cq *cq =
- resource_user_context;
-
- pr_debug("IB_EVENT_CQ_ERR\n");
- ib_event.device = &c2dev->ibdev;
- ib_event.element.cq = &cq->ibcq;
- ib_event.event = IB_EVENT_CQ_ERR;
-
- if (cq->ibcq.event_handler)
- cq->ibcq.event_handler(&ib_event,
- cq->ibcq.cq_context);
- break;
- }
-
- default:
- printk("Bad resource indicator = %d\n",
- resource_indicator);
- break;
- }
-
- ignore_it:
- c2_mq_free(mq);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_ae.h b/drivers/staging/rdma/amso1100/c2_ae.h
deleted file mode 100644
index 3a065c33b83b..000000000000
--- a/drivers/staging/rdma/amso1100/c2_ae.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _C2_AE_H_
-#define _C2_AE_H_
-
-/*
- * WARNING: If you change this file, also bump C2_IVN_BASE
- * in common/include/clustercore/c2_ivn.h.
- */
-
-/*
- * Asynchronous Event Identifiers
- *
- * These start at 0x80 only so it's obvious from inspection that
- * they are not work-request statuses. This isn't critical.
- *
- * NOTE: these event id's must fit in eight bits.
- */
-enum c2_event_id {
- CCAE_REMOTE_SHUTDOWN = 0x80,
- CCAE_ACTIVE_CONNECT_RESULTS,
- CCAE_CONNECTION_REQUEST,
- CCAE_LLP_CLOSE_COMPLETE,
- CCAE_TERMINATE_MESSAGE_RECEIVED,
- CCAE_LLP_CONNECTION_RESET,
- CCAE_LLP_CONNECTION_LOST,
- CCAE_LLP_SEGMENT_SIZE_INVALID,
- CCAE_LLP_INVALID_CRC,
- CCAE_LLP_BAD_FPDU,
- CCAE_INVALID_DDP_VERSION,
- CCAE_INVALID_RDMA_VERSION,
- CCAE_UNEXPECTED_OPCODE,
- CCAE_INVALID_DDP_QUEUE_NUMBER,
- CCAE_RDMA_READ_NOT_ENABLED,
- CCAE_RDMA_WRITE_NOT_ENABLED,
- CCAE_RDMA_READ_TOO_SMALL,
- CCAE_NO_L_BIT,
- CCAE_TAGGED_INVALID_STAG,
- CCAE_TAGGED_BASE_BOUNDS_VIOLATION,
- CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION,
- CCAE_TAGGED_INVALID_PD,
- CCAE_WRAP_ERROR,
- CCAE_BAD_CLOSE,
- CCAE_BAD_LLP_CLOSE,
- CCAE_INVALID_MSN_RANGE,
- CCAE_INVALID_MSN_GAP,
- CCAE_IRRQ_OVERFLOW,
- CCAE_IRRQ_MSN_GAP,
- CCAE_IRRQ_MSN_RANGE,
- CCAE_IRRQ_INVALID_STAG,
- CCAE_IRRQ_BASE_BOUNDS_VIOLATION,
- CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION,
- CCAE_IRRQ_INVALID_PD,
- CCAE_IRRQ_WRAP_ERROR,
- CCAE_CQ_SQ_COMPLETION_OVERFLOW,
- CCAE_CQ_RQ_COMPLETION_ERROR,
- CCAE_QP_SRQ_WQE_ERROR,
- CCAE_QP_LOCAL_CATASTROPHIC_ERROR,
- CCAE_CQ_OVERFLOW,
- CCAE_CQ_OPERATION_ERROR,
- CCAE_SRQ_LIMIT_REACHED,
- CCAE_QP_RQ_LIMIT_REACHED,
- CCAE_SRQ_CATASTROPHIC_ERROR,
- CCAE_RNIC_CATASTROPHIC_ERROR
-/* WARNING If you add more id's, make sure their values fit in eight bits. */
-};
-
-/*
- * Resource Indicators and Identifiers
- */
-enum c2_resource_indicator {
- C2_RES_IND_QP = 1,
- C2_RES_IND_EP,
- C2_RES_IND_CQ,
- C2_RES_IND_SRQ,
-};
-
-#endif /* _C2_AE_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_alloc.c b/drivers/staging/rdma/amso1100/c2_alloc.c
deleted file mode 100644
index 039872dfabbc..000000000000
--- a/drivers/staging/rdma/amso1100/c2_alloc.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/errno.h>
-#include <linux/bitmap.h>
-
-#include "c2.h"
-
-static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
- struct sp_chunk **head)
-{
- int i;
- struct sp_chunk *new_head;
- dma_addr_t dma_addr;
-
- new_head = dma_alloc_coherent(&c2dev->pcidev->dev, PAGE_SIZE,
- &dma_addr, gfp_mask);
- if (new_head == NULL)
- return -ENOMEM;
-
- new_head->dma_addr = dma_addr;
- dma_unmap_addr_set(new_head, mapping, new_head->dma_addr);
-
- new_head->next = NULL;
- new_head->head = 0;
-
- /* build list where each index is the next free slot */
- for (i = 0;
- i < (PAGE_SIZE - sizeof(struct sp_chunk) -
- sizeof(u16)) / sizeof(u16) - 1;
- i++) {
- new_head->shared_ptr[i] = i + 1;
- }
- /* terminate list */
- new_head->shared_ptr[i] = 0xFFFF;
-
- *head = new_head;
- return 0;
-}
-
-int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
- struct sp_chunk **root)
-{
- return c2_alloc_mqsp_chunk(c2dev, gfp_mask, root);
-}
-
-void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
-{
- struct sp_chunk *next;
-
- while (root) {
- next = root->next;
- dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root,
- dma_unmap_addr(root, mapping));
- root = next;
- }
-}
-
-__be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
- dma_addr_t *dma_addr, gfp_t gfp_mask)
-{
- u16 mqsp;
-
- while (head) {
- mqsp = head->head;
- if (mqsp != 0xFFFF) {
- head->head = head->shared_ptr[mqsp];
- break;
- } else if (head->next == NULL) {
- if (c2_alloc_mqsp_chunk(c2dev, gfp_mask, &head->next) ==
- 0) {
- head = head->next;
- mqsp = head->head;
- head->head = head->shared_ptr[mqsp];
- break;
- } else
- return NULL;
- } else
- head = head->next;
- }
- if (head) {
- *dma_addr = head->dma_addr +
- ((unsigned long) &(head->shared_ptr[mqsp]) -
- (unsigned long) head);
- pr_debug("%s addr %p dma_addr %llx\n", __func__,
- &(head->shared_ptr[mqsp]), (unsigned long long) *dma_addr);
- return (__force __be16 *) &(head->shared_ptr[mqsp]);
- }
- return NULL;
-}
-
-void c2_free_mqsp(__be16 *mqsp)
-{
- struct sp_chunk *head;
- u16 idx;
-
- /* The chunk containing this ptr begins at the page boundary */
- head = (struct sp_chunk *) ((unsigned long) mqsp & PAGE_MASK);
-
- /* Link head to new mqsp */
- *mqsp = (__force __be16) head->head;
-
- /* Compute the shared_ptr index */
- idx = (offset_in_page(mqsp)) >> 1;
- idx -= (unsigned long) &(((struct sp_chunk *) 0)->shared_ptr[0]) >> 1;
-
- /* Point this index at the head */
- head->shared_ptr[idx] = head->head;
-
- /* Point head at this index */
- head->head = idx;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_cm.c b/drivers/staging/rdma/amso1100/c2_cm.c
deleted file mode 100644
index f8dbdb9e0f66..000000000000
--- a/drivers/staging/rdma/amso1100/c2_cm.c
+++ /dev/null
@@ -1,458 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-#include <linux/slab.h>
-
-#include "c2.h"
-#include "c2_wr.h"
-#include "c2_vq.h"
-#include <rdma/iw_cm.h>
-
-int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
-{
- struct c2_dev *c2dev = to_c2dev(cm_id->device);
- struct ib_qp *ibqp;
- struct c2_qp *qp;
- struct c2wr_qp_connect_req *wr; /* variable size needs a malloc. */
- struct c2_vq_req *vq_req;
- int err;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
-
- if (cm_id->remote_addr.ss_family != AF_INET)
- return -ENOSYS;
-
- ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
- if (!ibqp)
- return -EINVAL;
- qp = to_c2qp(ibqp);
-
- /* Associate QP <--> CM_ID */
- cm_id->provider_data = qp;
- cm_id->add_ref(cm_id);
- qp->cm_id = cm_id;
-
- /*
- * only support the max private_data length
- */
- if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
- err = -EINVAL;
- goto bail0;
- }
- /*
- * Set the rdma read limits
- */
- err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
- if (err)
- goto bail0;
-
- /*
- * Create and send a WR_QP_CONNECT...
- */
- wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
- if (!wr) {
- err = -ENOMEM;
- goto bail0;
- }
-
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req) {
- err = -ENOMEM;
- goto bail1;
- }
-
- c2_wr_set_id(wr, CCWR_QP_CONNECT);
- wr->hdr.context = 0;
- wr->rnic_handle = c2dev->adapter_handle;
- wr->qp_handle = qp->adapter_handle;
-
- wr->remote_addr = raddr->sin_addr.s_addr;
- wr->remote_port = raddr->sin_port;
-
- /*
- * Move any private data from the callers's buf into
- * the WR.
- */
- if (iw_param->private_data) {
- wr->private_data_length =
- cpu_to_be32(iw_param->private_data_len);
- memcpy(&wr->private_data[0], iw_param->private_data,
- iw_param->private_data_len);
- } else
- wr->private_data_length = 0;
-
- /*
- * Send WR to adapter. NOTE: There is no synch reply from
- * the adapter.
- */
- err = vq_send_wr(c2dev, (union c2wr *) wr);
- vq_req_free(c2dev, vq_req);
-
- bail1:
- kfree(wr);
- bail0:
- if (err) {
- /*
- * If we fail, release reference on QP and
- * disassociate QP from CM_ID
- */
- cm_id->provider_data = NULL;
- qp->cm_id = NULL;
- cm_id->rem_ref(cm_id);
- }
- return err;
-}
-
-int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog)
-{
- struct c2_dev *c2dev;
- struct c2wr_ep_listen_create_req wr;
- struct c2wr_ep_listen_create_rep *reply;
- struct c2_vq_req *vq_req;
- int err;
- struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
-
- if (cm_id->local_addr.ss_family != AF_INET)
- return -ENOSYS;
-
- c2dev = to_c2dev(cm_id->device);
- if (c2dev == NULL)
- return -EINVAL;
-
- /*
- * Allocate verbs request.
- */
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req)
- return -ENOMEM;
-
- /*
- * Build the WR
- */
- c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE);
- wr.hdr.context = (u64) (unsigned long) vq_req;
- wr.rnic_handle = c2dev->adapter_handle;
- wr.local_addr = laddr->sin_addr.s_addr;
- wr.local_port = laddr->sin_port;
- wr.backlog = cpu_to_be32(backlog);
- wr.user_context = (u64) (unsigned long) cm_id;
-
- /*
- * Reference the request struct. Dereferenced in the int handler.
- */
- vq_req_get(c2dev, vq_req);
-
- /*
- * Send WR to adapter
- */
- err = vq_send_wr(c2dev, (union c2wr *) & wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail0;
- }
-
- /*
- * Wait for reply from adapter
- */
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err)
- goto bail0;
-
- /*
- * Process reply
- */
- reply =
- (struct c2wr_ep_listen_create_rep *) (unsigned long) vq_req->reply_msg;
- if (!reply) {
- err = -ENOMEM;
- goto bail1;
- }
-
- if ((err = c2_errno(reply)) != 0)
- goto bail1;
-
- /*
- * Keep the adapter handle. Used in subsequent destroy
- */
- cm_id->provider_data = (void*)(unsigned long) reply->ep_handle;
-
- /*
- * free vq stuff
- */
- vq_repbuf_free(c2dev, reply);
- vq_req_free(c2dev, vq_req);
-
- return 0;
-
- bail1:
- vq_repbuf_free(c2dev, reply);
- bail0:
- vq_req_free(c2dev, vq_req);
- return err;
-}
-
-
-int c2_llp_service_destroy(struct iw_cm_id *cm_id)
-{
-
- struct c2_dev *c2dev;
- struct c2wr_ep_listen_destroy_req wr;
- struct c2wr_ep_listen_destroy_rep *reply;
- struct c2_vq_req *vq_req;
- int err;
-
- c2dev = to_c2dev(cm_id->device);
- if (c2dev == NULL)
- return -EINVAL;
-
- /*
- * Allocate verbs request.
- */
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req)
- return -ENOMEM;
-
- /*
- * Build the WR
- */
- c2_wr_set_id(&wr, CCWR_EP_LISTEN_DESTROY);
- wr.hdr.context = (unsigned long) vq_req;
- wr.rnic_handle = c2dev->adapter_handle;
- wr.ep_handle = (u32)(unsigned long)cm_id->provider_data;
-
- /*
- * reference the request struct. dereferenced in the int handler.
- */
- vq_req_get(c2dev, vq_req);
-
- /*
- * Send WR to adapter
- */
- err = vq_send_wr(c2dev, (union c2wr *) & wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail0;
- }
-
- /*
- * Wait for reply from adapter
- */
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err)
- goto bail0;
-
- /*
- * Process reply
- */
- reply=(struct c2wr_ep_listen_destroy_rep *)(unsigned long)vq_req->reply_msg;
- if (!reply) {
- err = -ENOMEM;
- goto bail0;
- }
-
- vq_repbuf_free(c2dev, reply);
- bail0:
- vq_req_free(c2dev, vq_req);
- return err;
-}
-
-int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
-{
- struct c2_dev *c2dev = to_c2dev(cm_id->device);
- struct c2_qp *qp;
- struct ib_qp *ibqp;
- struct c2wr_cr_accept_req *wr; /* variable length WR */
- struct c2_vq_req *vq_req;
- struct c2wr_cr_accept_rep *reply; /* VQ Reply msg ptr. */
- int err;
-
- ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
- if (!ibqp)
- return -EINVAL;
- qp = to_c2qp(ibqp);
-
- /* Set the RDMA read limits */
- err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
- if (err)
- goto bail0;
-
- /* Allocate verbs request. */
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req) {
- err = -ENOMEM;
- goto bail0;
- }
- vq_req->qp = qp;
- vq_req->cm_id = cm_id;
- vq_req->event = IW_CM_EVENT_ESTABLISHED;
-
- wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
- if (!wr) {
- err = -ENOMEM;
- goto bail1;
- }
-
- /* Build the WR */
- c2_wr_set_id(wr, CCWR_CR_ACCEPT);
- wr->hdr.context = (unsigned long) vq_req;
- wr->rnic_handle = c2dev->adapter_handle;
- wr->ep_handle = (u32) (unsigned long) cm_id->provider_data;
- wr->qp_handle = qp->adapter_handle;
-
- /* Replace the cr_handle with the QP after accept */
- cm_id->provider_data = qp;
- cm_id->add_ref(cm_id);
- qp->cm_id = cm_id;
-
- cm_id->provider_data = qp;
-
- /* Validate private_data length */
- if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
- err = -EINVAL;
- goto bail1;
- }
-
- if (iw_param->private_data) {
- wr->private_data_length = cpu_to_be32(iw_param->private_data_len);
- memcpy(&wr->private_data[0],
- iw_param->private_data, iw_param->private_data_len);
- } else
- wr->private_data_length = 0;
-
- /* Reference the request struct. Dereferenced in the int handler. */
- vq_req_get(c2dev, vq_req);
-
- /* Send WR to adapter */
- err = vq_send_wr(c2dev, (union c2wr *) wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail1;
- }
-
- /* Wait for reply from adapter */
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err)
- goto bail1;
-
- /* Check that reply is present */
- reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg;
- if (!reply) {
- err = -ENOMEM;
- goto bail1;
- }
-
- err = c2_errno(reply);
- vq_repbuf_free(c2dev, reply);
-
- if (!err)
- c2_set_qp_state(qp, C2_QP_STATE_RTS);
- bail1:
- kfree(wr);
- vq_req_free(c2dev, vq_req);
- bail0:
- if (err) {
- /*
- * If we fail, release reference on QP and
- * disassociate QP from CM_ID
- */
- cm_id->provider_data = NULL;
- qp->cm_id = NULL;
- cm_id->rem_ref(cm_id);
- }
- return err;
-}
-
-int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
-{
- struct c2_dev *c2dev;
- struct c2wr_cr_reject_req wr;
- struct c2_vq_req *vq_req;
- struct c2wr_cr_reject_rep *reply;
- int err;
-
- c2dev = to_c2dev(cm_id->device);
-
- /*
- * Allocate verbs request.
- */
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req)
- return -ENOMEM;
-
- /*
- * Build the WR
- */
- c2_wr_set_id(&wr, CCWR_CR_REJECT);
- wr.hdr.context = (unsigned long) vq_req;
- wr.rnic_handle = c2dev->adapter_handle;
- wr.ep_handle = (u32) (unsigned long) cm_id->provider_data;
-
- /*
- * reference the request struct. dereferenced in the int handler.
- */
- vq_req_get(c2dev, vq_req);
-
- /*
- * Send WR to adapter
- */
- err = vq_send_wr(c2dev, (union c2wr *) & wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail0;
- }
-
- /*
- * Wait for reply from adapter
- */
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err)
- goto bail0;
-
- /*
- * Process reply
- */
- reply = (struct c2wr_cr_reject_rep *) (unsigned long)
- vq_req->reply_msg;
- if (!reply) {
- err = -ENOMEM;
- goto bail0;
- }
- err = c2_errno(reply);
- /*
- * free vq stuff
- */
- vq_repbuf_free(c2dev, reply);
-
- bail0:
- vq_req_free(c2dev, vq_req);
- return err;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_cq.c b/drivers/staging/rdma/amso1100/c2_cq.c
deleted file mode 100644
index 7ad0c082485a..000000000000
--- a/drivers/staging/rdma/amso1100/c2_cq.c
+++ /dev/null
@@ -1,437 +0,0 @@
-/*
- * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-#include <linux/gfp.h>
-
-#include "c2.h"
-#include "c2_vq.h"
-#include "c2_status.h"
-
-#define C2_CQ_MSG_SIZE ((sizeof(struct c2wr_ce) + 32-1) & ~(32-1))
-
-static struct c2_cq *c2_cq_get(struct c2_dev *c2dev, int cqn)
-{
- struct c2_cq *cq;
- unsigned long flags;
-
- spin_lock_irqsave(&c2dev->lock, flags);
- cq = c2dev->qptr_array[cqn];
- if (!cq) {
- spin_unlock_irqrestore(&c2dev->lock, flags);
- return NULL;
- }
- atomic_inc(&cq->refcount);
- spin_unlock_irqrestore(&c2dev->lock, flags);
- return cq;
-}
-
-static void c2_cq_put(struct c2_cq *cq)
-{
- if (atomic_dec_and_test(&cq->refcount))
- wake_up(&cq->wait);
-}
-
-void c2_cq_event(struct c2_dev *c2dev, u32 mq_index)
-{
- struct c2_cq *cq;
-
- cq = c2_cq_get(c2dev, mq_index);
- if (!cq) {
- printk("discarding events on destroyed CQN=%d\n", mq_index);
- return;
- }
-
- (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
- c2_cq_put(cq);
-}
-
-void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index)
-{
- struct c2_cq *cq;
- struct c2_mq *q;
-
- cq = c2_cq_get(c2dev, mq_index);
- if (!cq)
- return;
-
- spin_lock_irq(&cq->lock);
- q = &cq->mq;
- if (q && !c2_mq_empty(q)) {
- u16 priv = q->priv;
- struct c2wr_ce *msg;
-
- while (priv != be16_to_cpu(*q->shared)) {
- msg = (struct c2wr_ce *)
- (q->msg_pool.host + priv * q->msg_size);
- if (msg->qp_user_context == (u64) (unsigned long) qp) {
- msg->qp_user_context = (u64) 0;
- }
- priv = (priv + 1) % q->q_size;
- }
- }
- spin_unlock_irq(&cq->lock);
- c2_cq_put(cq);
-}
-
-static inline enum ib_wc_status c2_cqe_status_to_openib(u8 status)
-{
- switch (status) {
- case C2_OK:
- return IB_WC_SUCCESS;
- case CCERR_FLUSHED:
- return IB_WC_WR_FLUSH_ERR;
- case CCERR_BASE_AND_BOUNDS_VIOLATION:
- return IB_WC_LOC_PROT_ERR;
- case CCERR_ACCESS_VIOLATION:
- return IB_WC_LOC_ACCESS_ERR;
- case CCERR_TOTAL_LENGTH_TOO_BIG:
- return IB_WC_LOC_LEN_ERR;
- case CCERR_INVALID_WINDOW:
- return IB_WC_MW_BIND_ERR;
- default:
- return IB_WC_GENERAL_ERR;
- }
-}
-
-
-static inline int c2_poll_one(struct c2_dev *c2dev,
- struct c2_cq *cq, struct ib_wc *entry)
-{
- struct c2wr_ce *ce;
- struct c2_qp *qp;
- int is_recv = 0;
-
- ce = c2_mq_consume(&cq->mq);
- if (!ce) {
- return -EAGAIN;
- }
-
- /*
- * if the qp returned is null then this qp has already
- * been freed and we are unable process the completion.
- * try pulling the next message
- */
- while ((qp =
- (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) {
- c2_mq_free(&cq->mq);
- ce = c2_mq_consume(&cq->mq);
- if (!ce)
- return -EAGAIN;
- }
-
- entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce));
- entry->wr_id = ce->hdr.context;
- entry->qp = &qp->ibqp;
- entry->wc_flags = 0;
- entry->slid = 0;
- entry->sl = 0;
- entry->src_qp = 0;
- entry->dlid_path_bits = 0;
- entry->pkey_index = 0;
-
- switch (c2_wr_get_id(ce)) {
- case C2_WR_TYPE_SEND:
- entry->opcode = IB_WC_SEND;
- break;
- case C2_WR_TYPE_RDMA_WRITE:
- entry->opcode = IB_WC_RDMA_WRITE;
- break;
- case C2_WR_TYPE_RDMA_READ:
- entry->opcode = IB_WC_RDMA_READ;
- break;
- case C2_WR_TYPE_RECV:
- entry->byte_len = be32_to_cpu(ce->bytes_rcvd);
- entry->opcode = IB_WC_RECV;
- is_recv = 1;
- break;
- default:
- break;
- }
-
- /* consume the WQEs */
- if (is_recv)
- c2_mq_lconsume(&qp->rq_mq, 1);
- else
- c2_mq_lconsume(&qp->sq_mq,
- be32_to_cpu(c2_wr_get_wqe_count(ce)) + 1);
-
- /* free the message */
- c2_mq_free(&cq->mq);
-
- return 0;
-}
-
-int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
-{
- struct c2_dev *c2dev = to_c2dev(ibcq->device);
- struct c2_cq *cq = to_c2cq(ibcq);
- unsigned long flags;
- int npolled, err;
-
- spin_lock_irqsave(&cq->lock, flags);
-
- for (npolled = 0; npolled < num_entries; ++npolled) {
-
- err = c2_poll_one(c2dev, cq, entry + npolled);
- if (err)
- break;
- }
-
- spin_unlock_irqrestore(&cq->lock, flags);
-
- return npolled;
-}
-
-int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
-{
- struct c2_mq_shared __iomem *shared;
- struct c2_cq *cq;
- unsigned long flags;
- int ret = 0;
-
- cq = to_c2cq(ibcq);
- shared = cq->mq.peer;
-
- if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP)
- writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type);
- else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
- writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type);
- else
- return -EINVAL;
-
- writeb(CQ_WAIT_FOR_DMA | CQ_ARMED, &shared->armed);
-
- /*
- * Now read back shared->armed to make the PCI
- * write synchronous. This is necessary for
- * correct cq notification semantics.
- */
- readb(&shared->armed);
-
- if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
- spin_lock_irqsave(&cq->lock, flags);
- ret = !c2_mq_empty(&cq->mq);
- spin_unlock_irqrestore(&cq->lock, flags);
- }
-
- return ret;
-}
-
-static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
-{
- dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
- mq->msg_pool.host, dma_unmap_addr(mq, mapping));
-}
-
-static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq,
- size_t q_size, size_t msg_size)
-{
- u8 *pool_start;
-
- if (q_size > SIZE_MAX / msg_size)
- return -EINVAL;
-
- pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size,
- &mq->host_dma, GFP_KERNEL);
- if (!pool_start)
- return -ENOMEM;
-
- c2_mq_rep_init(mq,
- 0, /* index (currently unknown) */
- q_size,
- msg_size,
- pool_start,
- NULL, /* peer (currently unknown) */
- C2_MQ_HOST_TARGET);
-
- dma_unmap_addr_set(mq, mapping, mq->host_dma);
-
- return 0;
-}
-
-int c2_init_cq(struct c2_dev *c2dev, int entries,
- struct c2_ucontext *ctx, struct c2_cq *cq)
-{
- struct c2wr_cq_create_req wr;
- struct c2wr_cq_create_rep *reply;
- unsigned long peer_pa;
- struct c2_vq_req *vq_req;
- int err;
-
- might_sleep();
-
- cq->ibcq.cqe = entries - 1;
- cq->is_kernel = !ctx;
-
- /* Allocate a shared pointer */
- cq->mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
- &cq->mq.shared_dma, GFP_KERNEL);
- if (!cq->mq.shared)
- return -ENOMEM;
-
- /* Allocate pages for the message pool */
- err = c2_alloc_cq_buf(c2dev, &cq->mq, entries + 1, C2_CQ_MSG_SIZE);
- if (err)
- goto bail0;
-
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req) {
- err = -ENOMEM;
- goto bail1;
- }
-
- memset(&wr, 0, sizeof(wr));
- c2_wr_set_id(&wr, CCWR_CQ_CREATE);
- wr.hdr.context = (unsigned long) vq_req;
- wr.rnic_handle = c2dev->adapter_handle;
- wr.msg_size = cpu_to_be32(cq->mq.msg_size);
- wr.depth = cpu_to_be32(cq->mq.q_size);
- wr.shared_ht = cpu_to_be64(cq->mq.shared_dma);
- wr.msg_pool = cpu_to_be64(cq->mq.host_dma);
- wr.user_context = (u64) (unsigned long) (cq);
-
- vq_req_get(c2dev, vq_req);
-
- err = vq_send_wr(c2dev, (union c2wr *) & wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail2;
- }
-
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err)
- goto bail2;
-
- reply = (struct c2wr_cq_create_rep *) (unsigned long) (vq_req->reply_msg);
- if (!reply) {
- err = -ENOMEM;
- goto bail2;
- }
-
- if ((err = c2_errno(reply)) != 0)
- goto bail3;
-
- cq->adapter_handle = reply->cq_handle;
- cq->mq.index = be32_to_cpu(reply->mq_index);
-
- peer_pa = c2dev->pa + be32_to_cpu(reply->adapter_shared);
- cq->mq.peer = ioremap_nocache(peer_pa, PAGE_SIZE);
- if (!cq->mq.peer) {
- err = -ENOMEM;
- goto bail3;
- }
-
- vq_repbuf_free(c2dev, reply);
- vq_req_free(c2dev, vq_req);
-
- spin_lock_init(&cq->lock);
- atomic_set(&cq->refcount, 1);
- init_waitqueue_head(&cq->wait);
-
- /*
- * Use the MQ index allocated by the adapter to
- * store the CQ in the qptr_array
- */
- cq->cqn = cq->mq.index;
- c2dev->qptr_array[cq->cqn] = cq;
-
- return 0;
-
-bail3:
- vq_repbuf_free(c2dev, reply);
-bail2:
- vq_req_free(c2dev, vq_req);
-bail1:
- c2_free_cq_buf(c2dev, &cq->mq);
-bail0:
- c2_free_mqsp(cq->mq.shared);
-
- return err;
-}
-
-void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq)
-{
- int err;
- struct c2_vq_req *vq_req;
- struct c2wr_cq_destroy_req wr;
- struct c2wr_cq_destroy_rep *reply;
-
- might_sleep();
-
- /* Clear CQ from the qptr array */
- spin_lock_irq(&c2dev->lock);
- c2dev->qptr_array[cq->mq.index] = NULL;
- atomic_dec(&cq->refcount);
- spin_unlock_irq(&c2dev->lock);
-
- wait_event(cq->wait, !atomic_read(&cq->refcount));
-
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req) {
- goto bail0;
- }
-
- memset(&wr, 0, sizeof(wr));
- c2_wr_set_id(&wr, CCWR_CQ_DESTROY);
- wr.hdr.context = (unsigned long) vq_req;
- wr.rnic_handle = c2dev->adapter_handle;
- wr.cq_handle = cq->adapter_handle;
-
- vq_req_get(c2dev, vq_req);
-
- err = vq_send_wr(c2dev, (union c2wr *) & wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail1;
- }
-
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err)
- goto bail1;
-
- reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg);
- if (reply)
- vq_repbuf_free(c2dev, reply);
-bail1:
- vq_req_free(c2dev, vq_req);
-bail0:
- if (cq->is_kernel) {
- c2_free_cq_buf(c2dev, &cq->mq);
- }
-
- return;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_intr.c b/drivers/staging/rdma/amso1100/c2_intr.c
deleted file mode 100644
index 74b32a971124..000000000000
--- a/drivers/staging/rdma/amso1100/c2_intr.c
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "c2.h"
-#include <rdma/iw_cm.h>
-#include "c2_vq.h"
-
-static void handle_mq(struct c2_dev *c2dev, u32 index);
-static void handle_vq(struct c2_dev *c2dev, u32 mq_index);
-
-/*
- * Handle RNIC interrupts
- */
-void c2_rnic_interrupt(struct c2_dev *c2dev)
-{
- unsigned int mq_index;
-
- while (c2dev->hints_read != be16_to_cpu(*c2dev->hint_count)) {
- mq_index = readl(c2dev->regs + PCI_BAR0_HOST_HINT);
- if (mq_index & 0x80000000) {
- break;
- }
-
- c2dev->hints_read++;
- handle_mq(c2dev, mq_index);
- }
-
-}
-
-/*
- * Top level MQ handler
- */
-static void handle_mq(struct c2_dev *c2dev, u32 mq_index)
-{
- if (c2dev->qptr_array[mq_index] == NULL) {
- pr_debug("handle_mq: stray activity for mq_index=%d\n",
- mq_index);
- return;
- }
-
- switch (mq_index) {
- case (0):
- /*
- * An index of 0 in the activity queue
- * indicates the req vq now has messages
- * available...
- *
- * Wake up any waiters waiting on req VQ
- * message availability.
- */
- wake_up(&c2dev->req_vq_wo);
- break;
- case (1):
- handle_vq(c2dev, mq_index);
- break;
- case (2):
- /* We have to purge the VQ in case there are pending
- * accept reply requests that would result in the
- * generation of an ESTABLISHED event. If we don't
- * generate these first, a CLOSE event could end up
- * being delivered before the ESTABLISHED event.
- */
- handle_vq(c2dev, 1);
-
- c2_ae_event(c2dev, mq_index);
- break;
- default:
- /* There is no event synchronization between CQ events
- * and AE or CM events. In fact, CQE could be
- * delivered for all of the I/O up to and including the
- * FLUSH for a peer disconenct prior to the ESTABLISHED
- * event being delivered to the app. The reason for this
- * is that CM events are delivered on a thread, while AE
- * and CM events are delivered on interrupt context.
- */
- c2_cq_event(c2dev, mq_index);
- break;
- }
-
- return;
-}
-
-/*
- * Handles verbs WR replies.
- */
-static void handle_vq(struct c2_dev *c2dev, u32 mq_index)
-{
- void *adapter_msg, *reply_msg;
- struct c2wr_hdr *host_msg;
- struct c2wr_hdr tmp;
- struct c2_mq *reply_vq;
- struct c2_vq_req *req;
- struct iw_cm_event cm_event;
- int err;
-
- reply_vq = c2dev->qptr_array[mq_index];
-
- /*
- * get next msg from mq_index into adapter_msg.
- * don't free it yet.
- */
- adapter_msg = c2_mq_consume(reply_vq);
- if (adapter_msg == NULL) {
- return;
- }
-
- host_msg = vq_repbuf_alloc(c2dev);
-
- /*
- * If we can't get a host buffer, then we'll still
- * wakeup the waiter, we just won't give him the msg.
- * It is assumed the waiter will deal with this...
- */
- if (!host_msg) {
- pr_debug("handle_vq: no repbufs!\n");
-
- /*
- * just copy the WR header into a local variable.
- * this allows us to still demux on the context
- */
- host_msg = &tmp;
- memcpy(host_msg, adapter_msg, sizeof(tmp));
- reply_msg = NULL;
- } else {
- memcpy(host_msg, adapter_msg, reply_vq->msg_size);
- reply_msg = host_msg;
- }
-
- /*
- * consume the msg from the MQ
- */
- c2_mq_free(reply_vq);
-
- /*
- * wakeup the waiter.
- */
- req = (struct c2_vq_req *) (unsigned long) host_msg->context;
- if (req == NULL) {
- /*
- * We should never get here, as the adapter should
- * never send us a reply that we're not expecting.
- */
- if (reply_msg != NULL)
- vq_repbuf_free(c2dev, host_msg);
- pr_debug("handle_vq: UNEXPECTEDLY got NULL req\n");
- return;
- }
-
- if (reply_msg)
- err = c2_errno(reply_msg);
- else
- err = -ENOMEM;
-
- if (!err) switch (req->event) {
- case IW_CM_EVENT_ESTABLISHED:
- c2_set_qp_state(req->qp,
- C2_QP_STATE_RTS);
- /*
- * Until ird/ord negotiation via MPAv2 support is added, send
- * max supported values
- */
- cm_event.ird = cm_event.ord = 128;
- case IW_CM_EVENT_CLOSE:
-
- /*
- * Move the QP to RTS if this is
- * the established event
- */
- cm_event.event = req->event;
- cm_event.status = 0;
- cm_event.local_addr = req->cm_id->local_addr;
- cm_event.remote_addr = req->cm_id->remote_addr;
- cm_event.private_data = NULL;
- cm_event.private_data_len = 0;
- req->cm_id->event_handler(req->cm_id, &cm_event);
- break;
- default:
- break;
- }
-
- req->reply_msg = (u64) (unsigned long) (reply_msg);
- atomic_set(&req->reply_ready, 1);
- wake_up(&req->wait_object);
-
- /*
- * If the request was cancelled, then this put will
- * free the vq_req memory...and reply_msg!!!
- */
- vq_req_put(c2dev, req);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_mm.c b/drivers/staging/rdma/amso1100/c2_mm.c
deleted file mode 100644
index 25081e2913de..000000000000
--- a/drivers/staging/rdma/amso1100/c2_mm.c
+++ /dev/null
@@ -1,377 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/slab.h>
-
-#include "c2.h"
-#include "c2_vq.h"
-
-#define PBL_VIRT 1
-#define PBL_PHYS 2
-
-/*
- * Send all the PBL messages to convey the remainder of the PBL
- * Wait for the adapter's reply on the last one.
- * This is indicated by setting the MEM_PBL_COMPLETE in the flags.
- *
- * NOTE: vq_req is _not_ freed by this function. The VQ Host
- * Reply buffer _is_ freed by this function.
- */
-static int
-send_pbl_messages(struct c2_dev *c2dev, __be32 stag_index,
- unsigned long va, u32 pbl_depth,
- struct c2_vq_req *vq_req, int pbl_type)
-{
- u32 pbe_count; /* amt that fits in a PBL msg */
- u32 count; /* amt in this PBL MSG. */
- struct c2wr_nsmr_pbl_req *wr; /* PBL WR ptr */
- struct c2wr_nsmr_pbl_rep *reply; /* reply ptr */
- int err, pbl_virt, pbl_index, i;
-
- switch (pbl_type) {
- case PBL_VIRT:
- pbl_virt = 1;
- break;
- case PBL_PHYS:
- pbl_virt = 0;
- break;
- default:
- return -EINVAL;
- break;
- }
-
- pbe_count = (c2dev->req_vq.msg_size -
- sizeof(struct c2wr_nsmr_pbl_req)) / sizeof(u64);
- wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
- if (!wr) {
- return -ENOMEM;
- }
- c2_wr_set_id(wr, CCWR_NSMR_PBL);
-
- /*
- * Only the last PBL message will generate a reply from the verbs,
- * so we set the context to 0 indicating there is no kernel verbs
- * handler blocked awaiting this reply.
- */
- wr->hdr.context = 0;
- wr->rnic_handle = c2dev->adapter_handle;
- wr->stag_index = stag_index; /* already swapped */
- wr->flags = 0;
- pbl_index = 0;
- while (pbl_depth) {
- count = min(pbe_count, pbl_depth);
- wr->addrs_length = cpu_to_be32(count);
-
- /*
- * If this is the last message, then reference the
- * vq request struct cuz we're gonna wait for a reply.
- * also make this PBL msg as the last one.
- */
- if (count == pbl_depth) {
- /*
- * reference the request struct. dereferenced in the
- * int handler.
- */
- vq_req_get(c2dev, vq_req);
- wr->flags = cpu_to_be32(MEM_PBL_COMPLETE);
-
- /*
- * This is the last PBL message.
- * Set the context to our VQ Request Object so we can
- * wait for the reply.
- */
- wr->hdr.context = (unsigned long) vq_req;
- }
-
- /*
- * If pbl_virt is set then va is a virtual address
- * that describes a virtually contiguous memory
- * allocation. The wr needs the start of each virtual page
- * to be converted to the corresponding physical address
- * of the page. If pbl_virt is not set then va is an array
- * of physical addresses and there is no conversion to do.
- * Just fill in the wr with what is in the array.
- */
- for (i = 0; i < count; i++) {
- if (pbl_virt) {
- va += PAGE_SIZE;
- } else {
- wr->paddrs[i] =
- cpu_to_be64(((u64 *)va)[pbl_index + i]);
- }
- }
-
- /*
- * Send WR to adapter
- */
- err = vq_send_wr(c2dev, (union c2wr *) wr);
- if (err) {
- if (count <= pbe_count) {
- vq_req_put(c2dev, vq_req);
- }
- goto bail0;
- }
- pbl_depth -= count;
- pbl_index += count;
- }
-
- /*
- * Now wait for the reply...
- */
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err) {
- goto bail0;
- }
-
- /*
- * Process reply
- */
- reply = (struct c2wr_nsmr_pbl_rep *) (unsigned long) vq_req->reply_msg;
- if (!reply) {
- err = -ENOMEM;
- goto bail0;
- }
-
- err = c2_errno(reply);
-
- vq_repbuf_free(c2dev, reply);
-bail0:
- kfree(wr);
- return err;
-}
-
-#define C2_PBL_MAX_DEPTH 131072
-int
-c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
- int page_size, int pbl_depth, u32 length,
- u32 offset, u64 *va, enum c2_acf acf,
- struct c2_mr *mr)
-{
- struct c2_vq_req *vq_req;
- struct c2wr_nsmr_register_req *wr;
- struct c2wr_nsmr_register_rep *reply;
- u16 flags;
- int i, pbe_count, count;
- int err;
-
- if (!va || !length || !addr_list || !pbl_depth)
- return -EINTR;
-
- /*
- * Verify PBL depth is within rnic max
- */
- if (pbl_depth > C2_PBL_MAX_DEPTH) {
- return -EINTR;
- }
-
- /*
- * allocate verbs request object
- */
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req)
- return -ENOMEM;
-
- wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
- if (!wr) {
- err = -ENOMEM;
- goto bail0;
- }
-
- /*
- * build the WR
- */
- c2_wr_set_id(wr, CCWR_NSMR_REGISTER);
- wr->hdr.context = (unsigned long) vq_req;
- wr->rnic_handle = c2dev->adapter_handle;
-
- flags = (acf | MEM_VA_BASED | MEM_REMOTE);
-
- /*
- * compute how many pbes can fit in the message
- */
- pbe_count = (c2dev->req_vq.msg_size -
- sizeof(struct c2wr_nsmr_register_req)) / sizeof(u64);
-
- if (pbl_depth <= pbe_count) {
- flags |= MEM_PBL_COMPLETE;
- }
- wr->flags = cpu_to_be16(flags);
- wr->stag_key = 0; //stag_key;
- wr->va = cpu_to_be64(*va);
- wr->pd_id = mr->pd->pd_id;
- wr->pbe_size = cpu_to_be32(page_size);
- wr->length = cpu_to_be32(length);
- wr->pbl_depth = cpu_to_be32(pbl_depth);
- wr->fbo = cpu_to_be32(offset);
- count = min(pbl_depth, pbe_count);
- wr->addrs_length = cpu_to_be32(count);
-
- /*
- * fill out the PBL for this message
- */
- for (i = 0; i < count; i++) {
- wr->paddrs[i] = cpu_to_be64(addr_list[i]);
- }
-
- /*
- * regerence the request struct
- */
- vq_req_get(c2dev, vq_req);
-
- /*
- * send the WR to the adapter
- */
- err = vq_send_wr(c2dev, (union c2wr *) wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail1;
- }
-
- /*
- * wait for reply from adapter
- */
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err) {
- goto bail1;
- }
-
- /*
- * process reply
- */
- reply =
- (struct c2wr_nsmr_register_rep *) (unsigned long) (vq_req->reply_msg);
- if (!reply) {
- err = -ENOMEM;
- goto bail1;
- }
- if ((err = c2_errno(reply))) {
- goto bail2;
- }
- //*p_pb_entries = be32_to_cpu(reply->pbl_depth);
- mr->ibmr.lkey = mr->ibmr.rkey = be32_to_cpu(reply->stag_index);
- vq_repbuf_free(c2dev, reply);
-
- /*
- * if there are still more PBEs we need to send them to
- * the adapter and wait for a reply on the final one.
- * reuse vq_req for this purpose.
- */
- pbl_depth -= count;
- if (pbl_depth) {
-
- vq_req->reply_msg = (unsigned long) NULL;
- atomic_set(&vq_req->reply_ready, 0);
- err = send_pbl_messages(c2dev,
- cpu_to_be32(mr->ibmr.lkey),
- (unsigned long) &addr_list[i],
- pbl_depth, vq_req, PBL_PHYS);
- if (err) {
- goto bail1;
- }
- }
-
- vq_req_free(c2dev, vq_req);
- kfree(wr);
-
- return err;
-
-bail2:
- vq_repbuf_free(c2dev, reply);
-bail1:
- kfree(wr);
-bail0:
- vq_req_free(c2dev, vq_req);
- return err;
-}
-
-int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index)
-{
- struct c2_vq_req *vq_req; /* verbs request object */
- struct c2wr_stag_dealloc_req wr; /* work request */
- struct c2wr_stag_dealloc_rep *reply; /* WR reply */
- int err;
-
-
- /*
- * allocate verbs request object
- */
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req) {
- return -ENOMEM;
- }
-
- /*
- * Build the WR
- */
- c2_wr_set_id(&wr, CCWR_STAG_DEALLOC);
- wr.hdr.context = (u64) (unsigned long) vq_req;
- wr.rnic_handle = c2dev->adapter_handle;
- wr.stag_index = cpu_to_be32(stag_index);
-
- /*
- * reference the request struct. dereferenced in the int handler.
- */
- vq_req_get(c2dev, vq_req);
-
- /*
- * Send WR to adapter
- */
- err = vq_send_wr(c2dev, (union c2wr *) & wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail0;
- }
-
- /*
- * Wait for reply from adapter
- */
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err) {
- goto bail0;
- }
-
- /*
- * Process reply
- */
- reply = (struct c2wr_stag_dealloc_rep *) (unsigned long) vq_req->reply_msg;
- if (!reply) {
- err = -ENOMEM;
- goto bail0;
- }
-
- err = c2_errno(reply);
-
- vq_repbuf_free(c2dev, reply);
-bail0:
- vq_req_free(c2dev, vq_req);
- return err;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_mq.c b/drivers/staging/rdma/amso1100/c2_mq.c
deleted file mode 100644
index 7827fb8bdb10..000000000000
--- a/drivers/staging/rdma/amso1100/c2_mq.c
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "c2.h"
-#include "c2_mq.h"
-
-void *c2_mq_alloc(struct c2_mq *q)
-{
- BUG_ON(q->magic != C2_MQ_MAGIC);
- BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
-
- if (c2_mq_full(q)) {
- return NULL;
- } else {
-#ifdef DEBUG
- struct c2wr_hdr *m =
- (struct c2wr_hdr *) (q->msg_pool.host + q->priv * q->msg_size);
-#ifdef CCMSGMAGIC
- BUG_ON(m->magic != be32_to_cpu(~CCWR_MAGIC));
- m->magic = cpu_to_be32(CCWR_MAGIC);
-#endif
- return m;
-#else
- return q->msg_pool.host + q->priv * q->msg_size;
-#endif
- }
-}
-
-void c2_mq_produce(struct c2_mq *q)
-{
- BUG_ON(q->magic != C2_MQ_MAGIC);
- BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
-
- if (!c2_mq_full(q)) {
- q->priv = (q->priv + 1) % q->q_size;
- q->hint_count++;
- /* Update peer's offset. */
- __raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
- }
-}
-
-void *c2_mq_consume(struct c2_mq *q)
-{
- BUG_ON(q->magic != C2_MQ_MAGIC);
- BUG_ON(q->type != C2_MQ_HOST_TARGET);
-
- if (c2_mq_empty(q)) {
- return NULL;
- } else {
-#ifdef DEBUG
- struct c2wr_hdr *m = (struct c2wr_hdr *)
- (q->msg_pool.host + q->priv * q->msg_size);
-#ifdef CCMSGMAGIC
- BUG_ON(m->magic != be32_to_cpu(CCWR_MAGIC));
-#endif
- return m;
-#else
- return q->msg_pool.host + q->priv * q->msg_size;
-#endif
- }
-}
-
-void c2_mq_free(struct c2_mq *q)
-{
- BUG_ON(q->magic != C2_MQ_MAGIC);
- BUG_ON(q->type != C2_MQ_HOST_TARGET);
-
- if (!c2_mq_empty(q)) {
-
-#ifdef CCMSGMAGIC
- {
- struct c2wr_hdr __iomem *m = (struct c2wr_hdr __iomem *)
- (q->msg_pool.adapter + q->priv * q->msg_size);
- __raw_writel(cpu_to_be32(~CCWR_MAGIC), &m->magic);
- }
-#endif
- q->priv = (q->priv + 1) % q->q_size;
- /* Update peer's offset. */
- __raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
- }
-}
-
-
-void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count)
-{
- BUG_ON(q->magic != C2_MQ_MAGIC);
- BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
-
- while (wqe_count--) {
- BUG_ON(c2_mq_empty(q));
- *q->shared = cpu_to_be16((be16_to_cpu(*q->shared)+1) % q->q_size);
- }
-}
-
-#if 0
-u32 c2_mq_count(struct c2_mq *q)
-{
- s32 count;
-
- if (q->type == C2_MQ_HOST_TARGET)
- count = be16_to_cpu(*q->shared) - q->priv;
- else
- count = q->priv - be16_to_cpu(*q->shared);
-
- if (count < 0)
- count += q->q_size;
-
- return (u32) count;
-}
-#endif /* 0 */
-
-void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
- u8 __iomem *pool_start, u16 __iomem *peer, u32 type)
-{
- BUG_ON(!q->shared);
-
- /* This code assumes the byte swapping has already been done! */
- q->index = index;
- q->q_size = q_size;
- q->msg_size = msg_size;
- q->msg_pool.adapter = pool_start;
- q->peer = (struct c2_mq_shared __iomem *) peer;
- q->magic = C2_MQ_MAGIC;
- q->type = type;
- q->priv = 0;
- q->hint_count = 0;
- return;
-}
-
-void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
- u8 *pool_start, u16 __iomem *peer, u32 type)
-{
- BUG_ON(!q->shared);
-
- /* This code assumes the byte swapping has already been done! */
- q->index = index;
- q->q_size = q_size;
- q->msg_size = msg_size;
- q->msg_pool.host = pool_start;
- q->peer = (struct c2_mq_shared __iomem *) peer;
- q->magic = C2_MQ_MAGIC;
- q->type = type;
- q->priv = 0;
- q->hint_count = 0;
- return;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_mq.h b/drivers/staging/rdma/amso1100/c2_mq.h
deleted file mode 100644
index 8e1b4d13409e..000000000000
--- a/drivers/staging/rdma/amso1100/c2_mq.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _C2_MQ_H_
-#define _C2_MQ_H_
-#include <linux/kernel.h>
-#include <linux/dma-mapping.h>
-#include "c2_wr.h"
-
-enum c2_shared_regs {
-
- C2_SHARED_ARMED = 0x10,
- C2_SHARED_NOTIFY = 0x18,
- C2_SHARED_SHARED = 0x40,
-};
-
-struct c2_mq_shared {
- u16 unused1;
- u8 armed;
- u8 notification_type;
- u32 unused2;
- u16 shared;
- /* Pad to 64 bytes. */
- u8 pad[64 - sizeof(u16) - 2 * sizeof(u8) - sizeof(u32) - sizeof(u16)];
-};
-
-enum c2_mq_type {
- C2_MQ_HOST_TARGET = 1,
- C2_MQ_ADAPTER_TARGET = 2,
-};
-
-/*
- * c2_mq_t is for kernel-mode MQs like the VQs Cand the AEQ.
- * c2_user_mq_t (which is the same format) is for user-mode MQs...
- */
-#define C2_MQ_MAGIC 0x4d512020 /* 'MQ ' */
-struct c2_mq {
- u32 magic;
- union {
- u8 *host;
- u8 __iomem *adapter;
- } msg_pool;
- dma_addr_t host_dma;
- DEFINE_DMA_UNMAP_ADDR(mapping);
- u16 hint_count;
- u16 priv;
- struct c2_mq_shared __iomem *peer;
- __be16 *shared;
- dma_addr_t shared_dma;
- u32 q_size;
- u32 msg_size;
- u32 index;
- enum c2_mq_type type;
-};
-
-static __inline__ int c2_mq_empty(struct c2_mq *q)
-{
- return q->priv == be16_to_cpu(*q->shared);
-}
-
-static __inline__ int c2_mq_full(struct c2_mq *q)
-{
- return q->priv == (be16_to_cpu(*q->shared) + q->q_size - 1) % q->q_size;
-}
-
-void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count);
-void *c2_mq_alloc(struct c2_mq *q);
-void c2_mq_produce(struct c2_mq *q);
-void *c2_mq_consume(struct c2_mq *q);
-void c2_mq_free(struct c2_mq *q);
-void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
- u8 __iomem *pool_start, u16 __iomem *peer, u32 type);
-void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
- u8 *pool_start, u16 __iomem *peer, u32 type);
-
-#endif /* _C2_MQ_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_pd.c b/drivers/staging/rdma/amso1100/c2_pd.c
deleted file mode 100644
index f3e81dc357bb..000000000000
--- a/drivers/staging/rdma/amso1100/c2_pd.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-
-#include "c2.h"
-#include "c2_provider.h"
-
-int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd)
-{
- u32 obj;
- int ret = 0;
-
- spin_lock(&c2dev->pd_table.lock);
- obj = find_next_zero_bit(c2dev->pd_table.table, c2dev->pd_table.max,
- c2dev->pd_table.last);
- if (obj >= c2dev->pd_table.max)
- obj = find_first_zero_bit(c2dev->pd_table.table,
- c2dev->pd_table.max);
- if (obj < c2dev->pd_table.max) {
- pd->pd_id = obj;
- __set_bit(obj, c2dev->pd_table.table);
- c2dev->pd_table.last = obj+1;
- if (c2dev->pd_table.last >= c2dev->pd_table.max)
- c2dev->pd_table.last = 0;
- } else
- ret = -ENOMEM;
- spin_unlock(&c2dev->pd_table.lock);
- return ret;
-}
-
-void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd)
-{
- spin_lock(&c2dev->pd_table.lock);
- __clear_bit(pd->pd_id, c2dev->pd_table.table);
- spin_unlock(&c2dev->pd_table.lock);
-}
-
-int c2_init_pd_table(struct c2_dev *c2dev)
-{
-
- c2dev->pd_table.last = 0;
- c2dev->pd_table.max = c2dev->props.max_pd;
- spin_lock_init(&c2dev->pd_table.lock);
- c2dev->pd_table.table = kmalloc(BITS_TO_LONGS(c2dev->props.max_pd) *
- sizeof(long), GFP_KERNEL);
- if (!c2dev->pd_table.table)
- return -ENOMEM;
- bitmap_zero(c2dev->pd_table.table, c2dev->props.max_pd);
- return 0;
-}
-
-void c2_cleanup_pd_table(struct c2_dev *c2dev)
-{
- kfree(c2dev->pd_table.table);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_provider.c b/drivers/staging/rdma/amso1100/c2_provider.c
deleted file mode 100644
index de8d10e1bde3..000000000000
--- a/drivers/staging/rdma/amso1100/c2_provider.c
+++ /dev/null
@@ -1,862 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/delay.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/if_vlan.h>
-#include <linux/crc32.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/if_arp.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-
-#include <rdma/ib_smi.h>
-#include <rdma/ib_umem.h>
-#include <rdma/ib_user_verbs.h>
-#include "c2.h"
-#include "c2_provider.h"
-#include "c2_user.h"
-
-static int c2_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
- struct ib_udata *uhw)
-{
- struct c2_dev *c2dev = to_c2dev(ibdev);
-
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- if (uhw->inlen || uhw->outlen)
- return -EINVAL;
-
- *props = c2dev->props;
- return 0;
-}
-
-static int c2_query_port(struct ib_device *ibdev,
- u8 port, struct ib_port_attr *props)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- props->max_mtu = IB_MTU_4096;
- props->lid = 0;
- props->lmc = 0;
- props->sm_lid = 0;
- props->sm_sl = 0;
- props->state = IB_PORT_ACTIVE;
- props->phys_state = 0;
- props->port_cap_flags =
- IB_PORT_CM_SUP |
- IB_PORT_REINIT_SUP |
- IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
- props->gid_tbl_len = 1;
- props->pkey_tbl_len = 1;
- props->qkey_viol_cntr = 0;
- props->active_width = 1;
- props->active_speed = IB_SPEED_SDR;
-
- return 0;
-}
-
-static int c2_query_pkey(struct ib_device *ibdev,
- u8 port, u16 index, u16 * pkey)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- *pkey = 0;
- return 0;
-}
-
-static int c2_query_gid(struct ib_device *ibdev, u8 port,
- int index, union ib_gid *gid)
-{
- struct c2_dev *c2dev = to_c2dev(ibdev);
-
- pr_debug("%s:%u\n", __func__, __LINE__);
- memset(&(gid->raw[0]), 0, sizeof(gid->raw));
- memcpy(&(gid->raw[0]), c2dev->pseudo_netdev->dev_addr, 6);
-
- return 0;
-}
-
-/* Allocate the user context data structure. This keeps track
- * of all objects associated with a particular user-mode client.
- */
-static struct ib_ucontext *c2_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
-{
- struct c2_ucontext *context;
-
- pr_debug("%s:%u\n", __func__, __LINE__);
- context = kmalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return ERR_PTR(-ENOMEM);
-
- return &context->ibucontext;
-}
-
-static int c2_dealloc_ucontext(struct ib_ucontext *context)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- kfree(context);
- return 0;
-}
-
-static int c2_mmap_uar(struct ib_ucontext *context, struct vm_area_struct *vma)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- return -ENOSYS;
-}
-
-static struct ib_pd *c2_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
-{
- struct c2_pd *pd;
- int err;
-
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- pd = kmalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return ERR_PTR(-ENOMEM);
-
- err = c2_pd_alloc(to_c2dev(ibdev), !context, pd);
- if (err) {
- kfree(pd);
- return ERR_PTR(err);
- }
-
- if (context) {
- if (ib_copy_to_udata(udata, &pd->pd_id, sizeof(__u32))) {
- c2_pd_free(to_c2dev(ibdev), pd);
- kfree(pd);
- return ERR_PTR(-EFAULT);
- }
- }
-
- return &pd->ibpd;
-}
-
-static int c2_dealloc_pd(struct ib_pd *pd)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- c2_pd_free(to_c2dev(pd->device), to_c2pd(pd));
- kfree(pd);
-
- return 0;
-}
-
-static struct ib_ah *c2_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- return ERR_PTR(-ENOSYS);
-}
-
-static int c2_ah_destroy(struct ib_ah *ah)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- return -ENOSYS;
-}
-
-static void c2_add_ref(struct ib_qp *ibqp)
-{
- struct c2_qp *qp;
- BUG_ON(!ibqp);
- qp = to_c2qp(ibqp);
- atomic_inc(&qp->refcount);
-}
-
-static void c2_rem_ref(struct ib_qp *ibqp)
-{
- struct c2_qp *qp;
- BUG_ON(!ibqp);
- qp = to_c2qp(ibqp);
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
-}
-
-struct ib_qp *c2_get_qp(struct ib_device *device, int qpn)
-{
- struct c2_dev* c2dev = to_c2dev(device);
- struct c2_qp *qp;
-
- qp = c2_find_qpn(c2dev, qpn);
- pr_debug("%s Returning QP=%p for QPN=%d, device=%p, refcount=%d\n",
- __func__, qp, qpn, device,
- (qp?atomic_read(&qp->refcount):0));
-
- return (qp?&qp->ibqp:NULL);
-}
-
-static struct ib_qp *c2_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
-{
- struct c2_qp *qp;
- int err;
-
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- if (init_attr->create_flags)
- return ERR_PTR(-EINVAL);
-
- switch (init_attr->qp_type) {
- case IB_QPT_RC:
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp) {
- pr_debug("%s: Unable to allocate QP\n", __func__);
- return ERR_PTR(-ENOMEM);
- }
- spin_lock_init(&qp->lock);
- if (pd->uobject) {
- /* userspace specific */
- }
-
- err = c2_alloc_qp(to_c2dev(pd->device),
- to_c2pd(pd), init_attr, qp);
-
- if (err && pd->uobject) {
- /* userspace specific */
- }
-
- break;
- default:
- pr_debug("%s: Invalid QP type: %d\n", __func__,
- init_attr->qp_type);
- return ERR_PTR(-EINVAL);
- }
-
- if (err) {
- kfree(qp);
- return ERR_PTR(err);
- }
-
- return &qp->ibqp;
-}
-
-static int c2_destroy_qp(struct ib_qp *ib_qp)
-{
- struct c2_qp *qp = to_c2qp(ib_qp);
-
- pr_debug("%s:%u qp=%p,qp->state=%d\n",
- __func__, __LINE__, ib_qp, qp->state);
- c2_free_qp(to_c2dev(ib_qp->device), qp);
- kfree(qp);
- return 0;
-}
-
-static struct ib_cq *c2_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata)
-{
- int entries = attr->cqe;
- struct c2_cq *cq;
- int err;
-
- if (attr->flags)
- return ERR_PTR(-EINVAL);
-
- cq = kmalloc(sizeof(*cq), GFP_KERNEL);
- if (!cq) {
- pr_debug("%s: Unable to allocate CQ\n", __func__);
- return ERR_PTR(-ENOMEM);
- }
-
- err = c2_init_cq(to_c2dev(ibdev), entries, NULL, cq);
- if (err) {
- pr_debug("%s: error initializing CQ\n", __func__);
- kfree(cq);
- return ERR_PTR(err);
- }
-
- return &cq->ibcq;
-}
-
-static int c2_destroy_cq(struct ib_cq *ib_cq)
-{
- struct c2_cq *cq = to_c2cq(ib_cq);
-
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- c2_free_cq(to_c2dev(ib_cq->device), cq);
- kfree(cq);
-
- return 0;
-}
-
-static inline u32 c2_convert_access(int acc)
-{
- return (acc & IB_ACCESS_REMOTE_WRITE ? C2_ACF_REMOTE_WRITE : 0) |
- (acc & IB_ACCESS_REMOTE_READ ? C2_ACF_REMOTE_READ : 0) |
- (acc & IB_ACCESS_LOCAL_WRITE ? C2_ACF_LOCAL_WRITE : 0) |
- C2_ACF_LOCAL_READ | C2_ACF_WINDOW_BIND;
-}
-
-static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc)
-{
- struct c2_mr *mr;
- u64 *page_list;
- const u32 total_len = 0xffffffff; /* AMSO1100 limit */
- int err, page_shift, pbl_depth, i;
- u64 kva = 0;
-
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- /*
- * This is a map of all phy mem...use a 32k page_shift.
- */
- page_shift = PAGE_SHIFT + 3;
- pbl_depth = ALIGN(total_len, BIT(page_shift)) >> page_shift;
-
- page_list = vmalloc(sizeof(u64) * pbl_depth);
- if (!page_list) {
- pr_debug("couldn't vmalloc page_list of size %zd\n",
- (sizeof(u64) * pbl_depth));
- return ERR_PTR(-ENOMEM);
- }
-
- for (i = 0; i < pbl_depth; i++)
- page_list[i] = (i << page_shift);
-
- mr = kmalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr) {
- vfree(page_list);
- return ERR_PTR(-ENOMEM);
- }
-
- mr->pd = to_c2pd(pd);
- mr->umem = NULL;
- pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, "
- "*iova_start %llx, first pa %llx, last pa %llx\n",
- __func__, page_shift, pbl_depth, total_len,
- (unsigned long long) kva,
- (unsigned long long) page_list[0],
- (unsigned long long) page_list[pbl_depth-1]);
- err = c2_nsmr_register_phys_kern(to_c2dev(pd->device), page_list,
- BIT(page_shift), pbl_depth,
- total_len, 0, &kva,
- c2_convert_access(acc), mr);
- vfree(page_list);
- if (err) {
- kfree(mr);
- return ERR_PTR(err);
- }
-
- return &mr->ibmr;
-}
-
-static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt, int acc, struct ib_udata *udata)
-{
- u64 *pages;
- u64 kva = 0;
- int shift, n, len;
- int i, k, entry;
- int err = 0;
- struct scatterlist *sg;
- struct c2_pd *c2pd = to_c2pd(pd);
- struct c2_mr *c2mr;
-
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- c2mr = kmalloc(sizeof(*c2mr), GFP_KERNEL);
- if (!c2mr)
- return ERR_PTR(-ENOMEM);
- c2mr->pd = c2pd;
-
- c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
- if (IS_ERR(c2mr->umem)) {
- err = PTR_ERR(c2mr->umem);
- kfree(c2mr);
- return ERR_PTR(err);
- }
-
- shift = ffs(c2mr->umem->page_size) - 1;
- n = c2mr->umem->nmap;
-
- pages = kmalloc_array(n, sizeof(u64), GFP_KERNEL);
- if (!pages) {
- err = -ENOMEM;
- goto err;
- }
-
- i = 0;
- for_each_sg(c2mr->umem->sg_head.sgl, sg, c2mr->umem->nmap, entry) {
- len = sg_dma_len(sg) >> shift;
- for (k = 0; k < len; ++k) {
- pages[i++] =
- sg_dma_address(sg) +
- (c2mr->umem->page_size * k);
- }
- }
-
- kva = virt;
- err = c2_nsmr_register_phys_kern(to_c2dev(pd->device),
- pages,
- c2mr->umem->page_size,
- i,
- length,
- ib_umem_offset(c2mr->umem),
- &kva,
- c2_convert_access(acc),
- c2mr);
- kfree(pages);
- if (err)
- goto err;
- return &c2mr->ibmr;
-
-err:
- ib_umem_release(c2mr->umem);
- kfree(c2mr);
- return ERR_PTR(err);
-}
-
-static int c2_dereg_mr(struct ib_mr *ib_mr)
-{
- struct c2_mr *mr = to_c2mr(ib_mr);
- int err;
-
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- err = c2_stag_dealloc(to_c2dev(ib_mr->device), ib_mr->lkey);
- if (err)
- pr_debug("c2_stag_dealloc failed: %d\n", err);
- else {
- if (mr->umem)
- ib_umem_release(mr->umem);
- kfree(mr);
- }
-
- return err;
-}
-
-static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct c2_dev *c2dev = container_of(dev, struct c2_dev, ibdev.dev);
- pr_debug("%s:%u\n", __func__, __LINE__);
- return sprintf(buf, "%x\n", c2dev->props.hw_ver);
-}
-
-static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct c2_dev *c2dev = container_of(dev, struct c2_dev, ibdev.dev);
- pr_debug("%s:%u\n", __func__, __LINE__);
- return sprintf(buf, "%x.%x.%x\n",
- (int) (c2dev->props.fw_ver >> 32),
- (int) (c2dev->props.fw_ver >> 16) & 0xffff,
- (int) (c2dev->props.fw_ver & 0xffff));
-}
-
-static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- return sprintf(buf, "AMSO1100\n");
-}
-
-static ssize_t show_board(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- return sprintf(buf, "%.*s\n", 32, "AMSO1100 Board ID");
-}
-
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
-
-static struct device_attribute *c2_dev_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_fw_ver,
- &dev_attr_hca_type,
- &dev_attr_board_id
-};
-
-static int c2_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata)
-{
- int err;
-
- err =
- c2_qp_modify(to_c2dev(ibqp->device), to_c2qp(ibqp), attr,
- attr_mask);
-
- return err;
-}
-
-static int c2_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- return -ENOSYS;
-}
-
-static int c2_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- return -ENOSYS;
-}
-
-static int c2_process_mad(struct ib_device *ibdev,
- int mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad,
- size_t in_mad_size,
- struct ib_mad_hdr *out_mad,
- size_t *out_mad_size,
- u16 *out_mad_pkey_index)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- return -ENOSYS;
-}
-
-static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- /* Request a connection */
- return c2_llp_connect(cm_id, iw_param);
-}
-
-static int c2_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- /* Accept the new connection */
- return c2_llp_accept(cm_id, iw_param);
-}
-
-static int c2_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- return c2_llp_reject(cm_id, pdata, pdata_len);
-}
-
-static int c2_service_create(struct iw_cm_id *cm_id, int backlog)
-{
- int err;
-
- pr_debug("%s:%u\n", __func__, __LINE__);
- err = c2_llp_service_create(cm_id, backlog);
- pr_debug("%s:%u err=%d\n",
- __func__, __LINE__,
- err);
- return err;
-}
-
-static int c2_service_destroy(struct iw_cm_id *cm_id)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- return c2_llp_service_destroy(cm_id);
-}
-
-static int c2_pseudo_up(struct net_device *netdev)
-{
- struct in_device *ind;
- struct c2_dev *c2dev = netdev->ml_priv;
-
- ind = in_dev_get(netdev);
- if (!ind)
- return 0;
-
- pr_debug("adding...\n");
- for_ifa(ind) {
-#ifdef DEBUG
- u8 *ip = (u8 *) & ifa->ifa_address;
-
- pr_debug("%s: %d.%d.%d.%d\n",
- ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]);
-#endif
- c2_add_addr(c2dev, ifa->ifa_address, ifa->ifa_mask);
- }
- endfor_ifa(ind);
- in_dev_put(ind);
-
- return 0;
-}
-
-static int c2_pseudo_down(struct net_device *netdev)
-{
- struct in_device *ind;
- struct c2_dev *c2dev = netdev->ml_priv;
-
- ind = in_dev_get(netdev);
- if (!ind)
- return 0;
-
- pr_debug("deleting...\n");
- for_ifa(ind) {
-#ifdef DEBUG
- u8 *ip = (u8 *) & ifa->ifa_address;
-
- pr_debug("%s: %d.%d.%d.%d\n",
- ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]);
-#endif
- c2_del_addr(c2dev, ifa->ifa_address, ifa->ifa_mask);
- }
- endfor_ifa(ind);
- in_dev_put(ind);
-
- return 0;
-}
-
-static int c2_pseudo_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
-{
- kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-static int c2_pseudo_change_mtu(struct net_device *netdev, int new_mtu)
-{
- if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
- return -EINVAL;
-
- netdev->mtu = new_mtu;
-
- /* TODO: Tell rnic about new rmda interface mtu */
- return 0;
-}
-
-static const struct net_device_ops c2_pseudo_netdev_ops = {
- .ndo_open = c2_pseudo_up,
- .ndo_stop = c2_pseudo_down,
- .ndo_start_xmit = c2_pseudo_xmit_frame,
- .ndo_change_mtu = c2_pseudo_change_mtu,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static void setup(struct net_device *netdev)
-{
- netdev->netdev_ops = &c2_pseudo_netdev_ops;
-
- netdev->watchdog_timeo = 0;
- netdev->type = ARPHRD_ETHER;
- netdev->mtu = 1500;
- netdev->hard_header_len = ETH_HLEN;
- netdev->addr_len = ETH_ALEN;
- netdev->tx_queue_len = 0;
- netdev->flags |= IFF_NOARP;
-}
-
-static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev)
-{
- char name[IFNAMSIZ];
- struct net_device *netdev;
-
- /* change ethxxx to iwxxx */
- strcpy(name, "iw");
- strcat(name, &c2dev->netdev->name[3]);
- netdev = alloc_netdev(0, name, NET_NAME_UNKNOWN, setup);
- if (!netdev) {
- printk(KERN_ERR PFX "%s - etherdev alloc failed",
- __func__);
- return NULL;
- }
-
- netdev->ml_priv = c2dev;
-
- SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev);
-
- memcpy_fromio(netdev->dev_addr, c2dev->kva + C2_REGS_RDMA_ENADDR, 6);
-
- /* Print out the MAC address */
- pr_debug("%s: MAC %pM\n", netdev->name, netdev->dev_addr);
-
-#if 0
- /* Disable network packets */
- netif_stop_queue(netdev);
-#endif
- return netdev;
-}
-
-static int c2_port_immutable(struct ib_device *ibdev, u8 port_num,
- struct ib_port_immutable *immutable)
-{
- struct ib_port_attr attr;
- int err;
-
- err = c2_query_port(ibdev, port_num, &attr);
- if (err)
- return err;
-
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
- immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
-
- return 0;
-}
-
-int c2_register_device(struct c2_dev *dev)
-{
- int ret = -ENOMEM;
- int i;
-
- /* Register pseudo network device */
- dev->pseudo_netdev = c2_pseudo_netdev_init(dev);
- if (!dev->pseudo_netdev)
- goto out;
-
- ret = register_netdev(dev->pseudo_netdev);
- if (ret)
- goto out_free_netdev;
-
- pr_debug("%s:%u\n", __func__, __LINE__);
- strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX);
- dev->ibdev.owner = THIS_MODULE;
- dev->ibdev.uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_POST_SEND) |
- (1ull << IB_USER_VERBS_CMD_POST_RECV);
-
- dev->ibdev.node_type = RDMA_NODE_RNIC;
- memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
- memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6);
- dev->ibdev.phys_port_cnt = 1;
- dev->ibdev.num_comp_vectors = 1;
- dev->ibdev.dma_device = &dev->pcidev->dev;
- dev->ibdev.query_device = c2_query_device;
- dev->ibdev.query_port = c2_query_port;
- dev->ibdev.query_pkey = c2_query_pkey;
- dev->ibdev.query_gid = c2_query_gid;
- dev->ibdev.alloc_ucontext = c2_alloc_ucontext;
- dev->ibdev.dealloc_ucontext = c2_dealloc_ucontext;
- dev->ibdev.mmap = c2_mmap_uar;
- dev->ibdev.alloc_pd = c2_alloc_pd;
- dev->ibdev.dealloc_pd = c2_dealloc_pd;
- dev->ibdev.create_ah = c2_ah_create;
- dev->ibdev.destroy_ah = c2_ah_destroy;
- dev->ibdev.create_qp = c2_create_qp;
- dev->ibdev.modify_qp = c2_modify_qp;
- dev->ibdev.destroy_qp = c2_destroy_qp;
- dev->ibdev.create_cq = c2_create_cq;
- dev->ibdev.destroy_cq = c2_destroy_cq;
- dev->ibdev.poll_cq = c2_poll_cq;
- dev->ibdev.get_dma_mr = c2_get_dma_mr;
- dev->ibdev.reg_user_mr = c2_reg_user_mr;
- dev->ibdev.dereg_mr = c2_dereg_mr;
- dev->ibdev.get_port_immutable = c2_port_immutable;
-
- dev->ibdev.alloc_fmr = NULL;
- dev->ibdev.unmap_fmr = NULL;
- dev->ibdev.dealloc_fmr = NULL;
- dev->ibdev.map_phys_fmr = NULL;
-
- dev->ibdev.attach_mcast = c2_multicast_attach;
- dev->ibdev.detach_mcast = c2_multicast_detach;
- dev->ibdev.process_mad = c2_process_mad;
-
- dev->ibdev.req_notify_cq = c2_arm_cq;
- dev->ibdev.post_send = c2_post_send;
- dev->ibdev.post_recv = c2_post_receive;
-
- dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
- if (dev->ibdev.iwcm == NULL) {
- ret = -ENOMEM;
- goto out_unregister_netdev;
- }
- dev->ibdev.iwcm->add_ref = c2_add_ref;
- dev->ibdev.iwcm->rem_ref = c2_rem_ref;
- dev->ibdev.iwcm->get_qp = c2_get_qp;
- dev->ibdev.iwcm->connect = c2_connect;
- dev->ibdev.iwcm->accept = c2_accept;
- dev->ibdev.iwcm->reject = c2_reject;
- dev->ibdev.iwcm->create_listen = c2_service_create;
- dev->ibdev.iwcm->destroy_listen = c2_service_destroy;
-
- ret = ib_register_device(&dev->ibdev, NULL);
- if (ret)
- goto out_free_iwcm;
-
- for (i = 0; i < ARRAY_SIZE(c2_dev_attributes); ++i) {
- ret = device_create_file(&dev->ibdev.dev,
- c2_dev_attributes[i]);
- if (ret)
- goto out_unregister_ibdev;
- }
- goto out;
-
-out_unregister_ibdev:
- ib_unregister_device(&dev->ibdev);
-out_free_iwcm:
- kfree(dev->ibdev.iwcm);
-out_unregister_netdev:
- unregister_netdev(dev->pseudo_netdev);
-out_free_netdev:
- free_netdev(dev->pseudo_netdev);
-out:
- pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret);
- return ret;
-}
-
-void c2_unregister_device(struct c2_dev *dev)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- unregister_netdev(dev->pseudo_netdev);
- free_netdev(dev->pseudo_netdev);
- ib_unregister_device(&dev->ibdev);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_provider.h b/drivers/staging/rdma/amso1100/c2_provider.h
deleted file mode 100644
index bf189987711f..000000000000
--- a/drivers/staging/rdma/amso1100/c2_provider.h
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef C2_PROVIDER_H
-#define C2_PROVIDER_H
-#include <linux/inetdevice.h>
-
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_pack.h>
-
-#include "c2_mq.h"
-#include <rdma/iw_cm.h>
-
-#define C2_MPT_FLAG_ATOMIC (1 << 14)
-#define C2_MPT_FLAG_REMOTE_WRITE (1 << 13)
-#define C2_MPT_FLAG_REMOTE_READ (1 << 12)
-#define C2_MPT_FLAG_LOCAL_WRITE (1 << 11)
-#define C2_MPT_FLAG_LOCAL_READ (1 << 10)
-
-struct c2_buf_list {
- void *buf;
- DEFINE_DMA_UNMAP_ADDR(mapping);
-};
-
-
-/* The user context keeps track of objects allocated for a
- * particular user-mode client. */
-struct c2_ucontext {
- struct ib_ucontext ibucontext;
-};
-
-struct c2_mtt;
-
-/* All objects associated with a PD are kept in the
- * associated user context if present.
- */
-struct c2_pd {
- struct ib_pd ibpd;
- u32 pd_id;
-};
-
-struct c2_mr {
- struct ib_mr ibmr;
- struct c2_pd *pd;
- struct ib_umem *umem;
-};
-
-struct c2_av;
-
-enum c2_ah_type {
- C2_AH_ON_HCA,
- C2_AH_PCI_POOL,
- C2_AH_KMALLOC
-};
-
-struct c2_ah {
- struct ib_ah ibah;
-};
-
-struct c2_cq {
- struct ib_cq ibcq;
- spinlock_t lock;
- atomic_t refcount;
- int cqn;
- int is_kernel;
- wait_queue_head_t wait;
-
- u32 adapter_handle;
- struct c2_mq mq;
-};
-
-struct c2_wq {
- spinlock_t lock;
-};
-struct iw_cm_id;
-struct c2_qp {
- struct ib_qp ibqp;
- struct iw_cm_id *cm_id;
- spinlock_t lock;
- atomic_t refcount;
- wait_queue_head_t wait;
- int qpn;
-
- u32 adapter_handle;
- u32 send_sgl_depth;
- u32 recv_sgl_depth;
- u32 rdma_write_sgl_depth;
- u8 state;
-
- struct c2_mq sq_mq;
- struct c2_mq rq_mq;
-};
-
-struct c2_cr_query_attrs {
- u32 local_addr;
- u32 remote_addr;
- u16 local_port;
- u16 remote_port;
-};
-
-static inline struct c2_pd *to_c2pd(struct ib_pd *ibpd)
-{
- return container_of(ibpd, struct c2_pd, ibpd);
-}
-
-static inline struct c2_ucontext *to_c2ucontext(struct ib_ucontext *ibucontext)
-{
- return container_of(ibucontext, struct c2_ucontext, ibucontext);
-}
-
-static inline struct c2_mr *to_c2mr(struct ib_mr *ibmr)
-{
- return container_of(ibmr, struct c2_mr, ibmr);
-}
-
-
-static inline struct c2_ah *to_c2ah(struct ib_ah *ibah)
-{
- return container_of(ibah, struct c2_ah, ibah);
-}
-
-static inline struct c2_cq *to_c2cq(struct ib_cq *ibcq)
-{
- return container_of(ibcq, struct c2_cq, ibcq);
-}
-
-static inline struct c2_qp *to_c2qp(struct ib_qp *ibqp)
-{
- return container_of(ibqp, struct c2_qp, ibqp);
-}
-
-static inline int is_rnic_addr(struct net_device *netdev, u32 addr)
-{
- struct in_device *ind;
- int ret = 0;
-
- ind = in_dev_get(netdev);
- if (!ind)
- return 0;
-
- for_ifa(ind) {
- if (ifa->ifa_address == addr) {
- ret = 1;
- break;
- }
- }
- endfor_ifa(ind);
- in_dev_put(ind);
- return ret;
-}
-#endif /* C2_PROVIDER_H */
diff --git a/drivers/staging/rdma/amso1100/c2_qp.c b/drivers/staging/rdma/amso1100/c2_qp.c
deleted file mode 100644
index ca364dbe369c..000000000000
--- a/drivers/staging/rdma/amso1100/c2_qp.c
+++ /dev/null
@@ -1,1024 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/gfp.h>
-
-#include "c2.h"
-#include "c2_vq.h"
-#include "c2_status.h"
-
-#define C2_MAX_ORD_PER_QP 128
-#define C2_MAX_IRD_PER_QP 128
-
-#define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count)
-#define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16)
-#define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF)
-
-#define NO_SUPPORT -1
-static const u8 c2_opcode[] = {
- [IB_WR_SEND] = C2_WR_TYPE_SEND,
- [IB_WR_SEND_WITH_IMM] = NO_SUPPORT,
- [IB_WR_RDMA_WRITE] = C2_WR_TYPE_RDMA_WRITE,
- [IB_WR_RDMA_WRITE_WITH_IMM] = NO_SUPPORT,
- [IB_WR_RDMA_READ] = C2_WR_TYPE_RDMA_READ,
- [IB_WR_ATOMIC_CMP_AND_SWP] = NO_SUPPORT,
- [IB_WR_ATOMIC_FETCH_AND_ADD] = NO_SUPPORT,
-};
-
-static int to_c2_state(enum ib_qp_state ib_state)
-{
- switch (ib_state) {
- case IB_QPS_RESET:
- return C2_QP_STATE_IDLE;
- case IB_QPS_RTS:
- return C2_QP_STATE_RTS;
- case IB_QPS_SQD:
- return C2_QP_STATE_CLOSING;
- case IB_QPS_SQE:
- return C2_QP_STATE_CLOSING;
- case IB_QPS_ERR:
- return C2_QP_STATE_ERROR;
- default:
- return -1;
- }
-}
-
-static int to_ib_state(enum c2_qp_state c2_state)
-{
- switch (c2_state) {
- case C2_QP_STATE_IDLE:
- return IB_QPS_RESET;
- case C2_QP_STATE_CONNECTING:
- return IB_QPS_RTR;
- case C2_QP_STATE_RTS:
- return IB_QPS_RTS;
- case C2_QP_STATE_CLOSING:
- return IB_QPS_SQD;
- case C2_QP_STATE_ERROR:
- return IB_QPS_ERR;
- case C2_QP_STATE_TERMINATE:
- return IB_QPS_SQE;
- default:
- return -1;
- }
-}
-
-static const char *to_ib_state_str(int ib_state)
-{
- static const char *state_str[] = {
- "IB_QPS_RESET",
- "IB_QPS_INIT",
- "IB_QPS_RTR",
- "IB_QPS_RTS",
- "IB_QPS_SQD",
- "IB_QPS_SQE",
- "IB_QPS_ERR"
- };
- if (ib_state < IB_QPS_RESET ||
- ib_state > IB_QPS_ERR)
- return "<invalid IB QP state>";
-
- ib_state -= IB_QPS_RESET;
- return state_str[ib_state];
-}
-
-void c2_set_qp_state(struct c2_qp *qp, int c2_state)
-{
- int new_state = to_ib_state(c2_state);
-
- pr_debug("%s: qp[%p] state modify %s --> %s\n",
- __func__,
- qp,
- to_ib_state_str(qp->state),
- to_ib_state_str(new_state));
- qp->state = new_state;
-}
-
-#define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF
-
-int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
- struct ib_qp_attr *attr, int attr_mask)
-{
- struct c2wr_qp_modify_req wr;
- struct c2wr_qp_modify_rep *reply;
- struct c2_vq_req *vq_req;
- unsigned long flags;
- u8 next_state;
- int err;
-
- pr_debug("%s:%d qp=%p, %s --> %s\n",
- __func__, __LINE__,
- qp,
- to_ib_state_str(qp->state),
- to_ib_state_str(attr->qp_state));
-
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req)
- return -ENOMEM;
-
- c2_wr_set_id(&wr, CCWR_QP_MODIFY);
- wr.hdr.context = (unsigned long) vq_req;
- wr.rnic_handle = c2dev->adapter_handle;
- wr.qp_handle = qp->adapter_handle;
- wr.ord = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
- wr.ird = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
- wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
- wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-
- if (attr_mask & IB_QP_STATE) {
- /* Ensure the state is valid */
- if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) {
- err = -EINVAL;
- goto bail0;
- }
-
- wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state));
-
- if (attr->qp_state == IB_QPS_ERR) {
- spin_lock_irqsave(&qp->lock, flags);
- if (qp->cm_id && qp->state == IB_QPS_RTS) {
- pr_debug("Generating CLOSE event for QP-->ERR, "
- "qp=%p, cm_id=%p\n",qp,qp->cm_id);
- /* Generate an CLOSE event */
- vq_req->cm_id = qp->cm_id;
- vq_req->event = IW_CM_EVENT_CLOSE;
- }
- spin_unlock_irqrestore(&qp->lock, flags);
- }
- next_state = attr->qp_state;
-
- } else if (attr_mask & IB_QP_CUR_STATE) {
-
- if (attr->cur_qp_state != IB_QPS_RTR &&
- attr->cur_qp_state != IB_QPS_RTS &&
- attr->cur_qp_state != IB_QPS_SQD &&
- attr->cur_qp_state != IB_QPS_SQE) {
- err = -EINVAL;
- goto bail0;
- } else
- wr.next_qp_state =
- cpu_to_be32(to_c2_state(attr->cur_qp_state));
-
- next_state = attr->cur_qp_state;
-
- } else {
- err = 0;
- goto bail0;
- }
-
- /* reference the request struct */
- vq_req_get(c2dev, vq_req);
-
- err = vq_send_wr(c2dev, (union c2wr *) & wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail0;
- }
-
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err)
- goto bail0;
-
- reply = (struct c2wr_qp_modify_rep *) (unsigned long) vq_req->reply_msg;
- if (!reply) {
- err = -ENOMEM;
- goto bail0;
- }
-
- err = c2_errno(reply);
- if (!err)
- qp->state = next_state;
-#ifdef DEBUG
- else
- pr_debug("%s: c2_errno=%d\n", __func__, err);
-#endif
- /*
- * If we're going to error and generating the event here, then
- * we need to remove the reference because there will be no
- * close event generated by the adapter
- */
- spin_lock_irqsave(&qp->lock, flags);
- if (vq_req->event==IW_CM_EVENT_CLOSE && qp->cm_id) {
- qp->cm_id->rem_ref(qp->cm_id);
- qp->cm_id = NULL;
- }
- spin_unlock_irqrestore(&qp->lock, flags);
-
- vq_repbuf_free(c2dev, reply);
-bail0:
- vq_req_free(c2dev, vq_req);
-
- pr_debug("%s:%d qp=%p, cur_state=%s\n",
- __func__, __LINE__,
- qp,
- to_ib_state_str(qp->state));
- return err;
-}
-
-int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
- int ord, int ird)
-{
- struct c2wr_qp_modify_req wr;
- struct c2wr_qp_modify_rep *reply;
- struct c2_vq_req *vq_req;
- int err;
-
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req)
- return -ENOMEM;
-
- c2_wr_set_id(&wr, CCWR_QP_MODIFY);
- wr.hdr.context = (unsigned long) vq_req;
- wr.rnic_handle = c2dev->adapter_handle;
- wr.qp_handle = qp->adapter_handle;
- wr.ord = cpu_to_be32(ord);
- wr.ird = cpu_to_be32(ird);
- wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
- wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
- wr.next_qp_state = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-
- /* reference the request struct */
- vq_req_get(c2dev, vq_req);
-
- err = vq_send_wr(c2dev, (union c2wr *) & wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail0;
- }
-
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err)
- goto bail0;
-
- reply = (struct c2wr_qp_modify_rep *) (unsigned long)
- vq_req->reply_msg;
- if (!reply) {
- err = -ENOMEM;
- goto bail0;
- }
-
- err = c2_errno(reply);
- vq_repbuf_free(c2dev, reply);
-bail0:
- vq_req_free(c2dev, vq_req);
- return err;
-}
-
-static int destroy_qp(struct c2_dev *c2dev, struct c2_qp *qp)
-{
- struct c2_vq_req *vq_req;
- struct c2wr_qp_destroy_req wr;
- struct c2wr_qp_destroy_rep *reply;
- unsigned long flags;
- int err;
-
- /*
- * Allocate a verb request message
- */
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req) {
- return -ENOMEM;
- }
-
- /*
- * Initialize the WR
- */
- c2_wr_set_id(&wr, CCWR_QP_DESTROY);
- wr.hdr.context = (unsigned long) vq_req;
- wr.rnic_handle = c2dev->adapter_handle;
- wr.qp_handle = qp->adapter_handle;
-
- /*
- * reference the request struct. dereferenced in the int handler.
- */
- vq_req_get(c2dev, vq_req);
-
- spin_lock_irqsave(&qp->lock, flags);
- if (qp->cm_id && qp->state == IB_QPS_RTS) {
- pr_debug("destroy_qp: generating CLOSE event for QP-->ERR, "
- "qp=%p, cm_id=%p\n",qp,qp->cm_id);
- /* Generate an CLOSE event */
- vq_req->qp = qp;
- vq_req->cm_id = qp->cm_id;
- vq_req->event = IW_CM_EVENT_CLOSE;
- }
- spin_unlock_irqrestore(&qp->lock, flags);
-
- /*
- * Send WR to adapter
- */
- err = vq_send_wr(c2dev, (union c2wr *) & wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail0;
- }
-
- /*
- * Wait for reply from adapter
- */
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err) {
- goto bail0;
- }
-
- /*
- * Process reply
- */
- reply = (struct c2wr_qp_destroy_rep *) (unsigned long) (vq_req->reply_msg);
- if (!reply) {
- err = -ENOMEM;
- goto bail0;
- }
-
- spin_lock_irqsave(&qp->lock, flags);
- if (qp->cm_id) {
- qp->cm_id->rem_ref(qp->cm_id);
- qp->cm_id = NULL;
- }
- spin_unlock_irqrestore(&qp->lock, flags);
-
- vq_repbuf_free(c2dev, reply);
-bail0:
- vq_req_free(c2dev, vq_req);
- return err;
-}
-
-static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp)
-{
- int ret;
-
- idr_preload(GFP_KERNEL);
- spin_lock_irq(&c2dev->qp_table.lock);
-
- ret = idr_alloc_cyclic(&c2dev->qp_table.idr, qp, 0, 0, GFP_NOWAIT);
- if (ret >= 0)
- qp->qpn = ret;
-
- spin_unlock_irq(&c2dev->qp_table.lock);
- idr_preload_end();
- return ret < 0 ? ret : 0;
-}
-
-static void c2_free_qpn(struct c2_dev *c2dev, int qpn)
-{
- spin_lock_irq(&c2dev->qp_table.lock);
- idr_remove(&c2dev->qp_table.idr, qpn);
- spin_unlock_irq(&c2dev->qp_table.lock);
-}
-
-struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn)
-{
- unsigned long flags;
- struct c2_qp *qp;
-
- spin_lock_irqsave(&c2dev->qp_table.lock, flags);
- qp = idr_find(&c2dev->qp_table.idr, qpn);
- spin_unlock_irqrestore(&c2dev->qp_table.lock, flags);
- return qp;
-}
-
-int c2_alloc_qp(struct c2_dev *c2dev,
- struct c2_pd *pd,
- struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp)
-{
- struct c2wr_qp_create_req wr;
- struct c2wr_qp_create_rep *reply;
- struct c2_vq_req *vq_req;
- struct c2_cq *send_cq = to_c2cq(qp_attrs->send_cq);
- struct c2_cq *recv_cq = to_c2cq(qp_attrs->recv_cq);
- unsigned long peer_pa;
- u32 q_size, msg_size, mmap_size;
- void __iomem *mmap;
- int err;
-
- err = c2_alloc_qpn(c2dev, qp);
- if (err)
- return err;
- qp->ibqp.qp_num = qp->qpn;
- qp->ibqp.qp_type = IB_QPT_RC;
-
- /* Allocate the SQ and RQ shared pointers */
- qp->sq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
- &qp->sq_mq.shared_dma, GFP_KERNEL);
- if (!qp->sq_mq.shared) {
- err = -ENOMEM;
- goto bail0;
- }
-
- qp->rq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
- &qp->rq_mq.shared_dma, GFP_KERNEL);
- if (!qp->rq_mq.shared) {
- err = -ENOMEM;
- goto bail1;
- }
-
- /* Allocate the verbs request */
- vq_req = vq_req_alloc(c2dev);
- if (vq_req == NULL) {
- err = -ENOMEM;
- goto bail2;
- }
-
- /* Initialize the work request */
- memset(&wr, 0, sizeof(wr));
- c2_wr_set_id(&wr, CCWR_QP_CREATE);
- wr.hdr.context = (unsigned long) vq_req;
- wr.rnic_handle = c2dev->adapter_handle;
- wr.sq_cq_handle = send_cq->adapter_handle;
- wr.rq_cq_handle = recv_cq->adapter_handle;
- wr.sq_depth = cpu_to_be32(qp_attrs->cap.max_send_wr + 1);
- wr.rq_depth = cpu_to_be32(qp_attrs->cap.max_recv_wr + 1);
- wr.srq_handle = 0;
- wr.flags = cpu_to_be32(QP_RDMA_READ | QP_RDMA_WRITE | QP_MW_BIND |
- QP_ZERO_STAG | QP_RDMA_READ_RESPONSE);
- wr.send_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
- wr.recv_sgl_depth = cpu_to_be32(qp_attrs->cap.max_recv_sge);
- wr.rdma_write_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
- wr.shared_sq_ht = cpu_to_be64(qp->sq_mq.shared_dma);
- wr.shared_rq_ht = cpu_to_be64(qp->rq_mq.shared_dma);
- wr.ord = cpu_to_be32(C2_MAX_ORD_PER_QP);
- wr.ird = cpu_to_be32(C2_MAX_IRD_PER_QP);
- wr.pd_id = pd->pd_id;
- wr.user_context = (unsigned long) qp;
-
- vq_req_get(c2dev, vq_req);
-
- /* Send the WR to the adapter */
- err = vq_send_wr(c2dev, (union c2wr *) & wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail3;
- }
-
- /* Wait for the verb reply */
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err) {
- goto bail3;
- }
-
- /* Process the reply */
- reply = (struct c2wr_qp_create_rep *) (unsigned long) (vq_req->reply_msg);
- if (!reply) {
- err = -ENOMEM;
- goto bail3;
- }
-
- if ((err = c2_wr_get_result(reply)) != 0) {
- goto bail4;
- }
-
- /* Fill in the kernel QP struct */
- atomic_set(&qp->refcount, 1);
- qp->adapter_handle = reply->qp_handle;
- qp->state = IB_QPS_RESET;
- qp->send_sgl_depth = qp_attrs->cap.max_send_sge;
- qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge;
- qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge;
- init_waitqueue_head(&qp->wait);
-
- /* Initialize the SQ MQ */
- q_size = be32_to_cpu(reply->sq_depth);
- msg_size = be32_to_cpu(reply->sq_msg_size);
- peer_pa = c2dev->pa + be32_to_cpu(reply->sq_mq_start);
- mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
- mmap = ioremap_nocache(peer_pa, mmap_size);
- if (!mmap) {
- err = -ENOMEM;
- goto bail5;
- }
-
- c2_mq_req_init(&qp->sq_mq,
- be32_to_cpu(reply->sq_mq_index),
- q_size,
- msg_size,
- mmap + sizeof(struct c2_mq_shared), /* pool start */
- mmap, /* peer */
- C2_MQ_ADAPTER_TARGET);
-
- /* Initialize the RQ mq */
- q_size = be32_to_cpu(reply->rq_depth);
- msg_size = be32_to_cpu(reply->rq_msg_size);
- peer_pa = c2dev->pa + be32_to_cpu(reply->rq_mq_start);
- mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
- mmap = ioremap_nocache(peer_pa, mmap_size);
- if (!mmap) {
- err = -ENOMEM;
- goto bail6;
- }
-
- c2_mq_req_init(&qp->rq_mq,
- be32_to_cpu(reply->rq_mq_index),
- q_size,
- msg_size,
- mmap + sizeof(struct c2_mq_shared), /* pool start */
- mmap, /* peer */
- C2_MQ_ADAPTER_TARGET);
-
- vq_repbuf_free(c2dev, reply);
- vq_req_free(c2dev, vq_req);
-
- return 0;
-
-bail6:
- iounmap(qp->sq_mq.peer);
-bail5:
- destroy_qp(c2dev, qp);
-bail4:
- vq_repbuf_free(c2dev, reply);
-bail3:
- vq_req_free(c2dev, vq_req);
-bail2:
- c2_free_mqsp(qp->rq_mq.shared);
-bail1:
- c2_free_mqsp(qp->sq_mq.shared);
-bail0:
- c2_free_qpn(c2dev, qp->qpn);
- return err;
-}
-
-static inline void c2_lock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
-{
- if (send_cq == recv_cq)
- spin_lock_irq(&send_cq->lock);
- else if (send_cq > recv_cq) {
- spin_lock_irq(&send_cq->lock);
- spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
- } else {
- spin_lock_irq(&recv_cq->lock);
- spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
- }
-}
-
-static inline void c2_unlock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
-{
- if (send_cq == recv_cq)
- spin_unlock_irq(&send_cq->lock);
- else if (send_cq > recv_cq) {
- spin_unlock(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
- } else {
- spin_unlock(&send_cq->lock);
- spin_unlock_irq(&recv_cq->lock);
- }
-}
-
-void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
-{
- struct c2_cq *send_cq;
- struct c2_cq *recv_cq;
-
- send_cq = to_c2cq(qp->ibqp.send_cq);
- recv_cq = to_c2cq(qp->ibqp.recv_cq);
-
- /*
- * Lock CQs here, so that CQ polling code can do QP lookup
- * without taking a lock.
- */
- c2_lock_cqs(send_cq, recv_cq);
- c2_free_qpn(c2dev, qp->qpn);
- c2_unlock_cqs(send_cq, recv_cq);
-
- /*
- * Destroy qp in the rnic...
- */
- destroy_qp(c2dev, qp);
-
- /*
- * Mark any unreaped CQEs as null and void.
- */
- c2_cq_clean(c2dev, qp, send_cq->cqn);
- if (send_cq != recv_cq)
- c2_cq_clean(c2dev, qp, recv_cq->cqn);
- /*
- * Unmap the MQs and return the shared pointers
- * to the message pool.
- */
- iounmap(qp->sq_mq.peer);
- iounmap(qp->rq_mq.peer);
- c2_free_mqsp(qp->sq_mq.shared);
- c2_free_mqsp(qp->rq_mq.shared);
-
- atomic_dec(&qp->refcount);
- wait_event(qp->wait, !atomic_read(&qp->refcount));
-}
-
-/*
- * Function: move_sgl
- *
- * Description:
- * Move an SGL from the user's work request struct into a CCIL Work Request
- * message, swapping to WR byte order and ensure the total length doesn't
- * overflow.
- *
- * IN:
- * dst - ptr to CCIL Work Request message SGL memory.
- * src - ptr to the consumers SGL memory.
- *
- * OUT: none
- *
- * Return:
- * CCIL status codes.
- */
-static int
-move_sgl(struct c2_data_addr * dst, struct ib_sge *src, int count, u32 * p_len,
- u8 * actual_count)
-{
- u32 tot = 0; /* running total */
- u8 acount = 0; /* running total non-0 len sge's */
-
- while (count > 0) {
- /*
- * If the addition of this SGE causes the
- * total SGL length to exceed 2^32-1, then
- * fail-n-bail.
- *
- * If the current total plus the next element length
- * wraps, then it will go negative and be less than the
- * current total...
- */
- if ((tot + src->length) < tot) {
- return -EINVAL;
- }
- /*
- * Bug: 1456 (as well as 1498 & 1643)
- * Skip over any sge's supplied with len=0
- */
- if (src->length) {
- tot += src->length;
- dst->stag = cpu_to_be32(src->lkey);
- dst->to = cpu_to_be64(src->addr);
- dst->length = cpu_to_be32(src->length);
- dst++;
- acount++;
- }
- src++;
- count--;
- }
-
- if (acount == 0) {
- /*
- * Bug: 1476 (as well as 1498, 1456 and 1643)
- * Setup the SGL in the WR to make it easier for the RNIC.
- * This way, the FW doesn't have to deal with special cases.
- * Setting length=0 should be sufficient.
- */
- dst->stag = 0;
- dst->to = 0;
- dst->length = 0;
- }
-
- *p_len = tot;
- *actual_count = acount;
- return 0;
-}
-
-/*
- * Function: c2_activity (private function)
- *
- * Description:
- * Post an mq index to the host->adapter activity fifo.
- *
- * IN:
- * c2dev - ptr to c2dev structure
- * mq_index - mq index to post
- * shared - value most recently written to shared
- *
- * OUT:
- *
- * Return:
- * none
- */
-static inline void c2_activity(struct c2_dev *c2dev, u32 mq_index, u16 shared)
-{
- /*
- * First read the register to see if the FIFO is full, and if so,
- * spin until it's not. This isn't perfect -- there is no
- * synchronization among the clients of the register, but in
- * practice it prevents multiple CPU from hammering the bus
- * with PCI RETRY. Note that when this does happen, the card
- * cannot get on the bus and the card and system hang in a
- * deadlock -- thus the need for this code. [TOT]
- */
- while (readl(c2dev->regs + PCI_BAR0_ADAPTER_HINT) & 0x80000000)
- udelay(10);
-
- __raw_writel(C2_HINT_MAKE(mq_index, shared),
- c2dev->regs + PCI_BAR0_ADAPTER_HINT);
-}
-
-/*
- * Function: qp_wr_post
- *
- * Description:
- * This in-line function allocates a MQ msg, then moves the host-copy of
- * the completed WR into msg. Then it posts the message.
- *
- * IN:
- * q - ptr to user MQ.
- * wr - ptr to host-copy of the WR.
- * qp - ptr to user qp
- * size - Number of bytes to post. Assumed to be divisible by 4.
- *
- * OUT: none
- *
- * Return:
- * CCIL status codes.
- */
-static int qp_wr_post(struct c2_mq *q, union c2wr * wr, struct c2_qp *qp, u32 size)
-{
- union c2wr *msg;
-
- msg = c2_mq_alloc(q);
- if (msg == NULL) {
- return -EINVAL;
- }
-#ifdef CCMSGMAGIC
- ((c2wr_hdr_t *) wr)->magic = cpu_to_be32(CCWR_MAGIC);
-#endif
-
- /*
- * Since all header fields in the WR are the same as the
- * CQE, set the following so the adapter need not.
- */
- c2_wr_set_result(wr, CCERR_PENDING);
-
- /*
- * Copy the wr down to the adapter
- */
- memcpy((void *) msg, (void *) wr, size);
-
- c2_mq_produce(q);
- return 0;
-}
-
-
-int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
- struct ib_send_wr **bad_wr)
-{
- struct c2_dev *c2dev = to_c2dev(ibqp->device);
- struct c2_qp *qp = to_c2qp(ibqp);
- union c2wr wr;
- unsigned long lock_flags;
- int err = 0;
-
- u32 flags;
- u32 tot_len;
- u8 actual_sge_count;
- u32 msg_size;
-
- if (qp->state > IB_QPS_RTS) {
- err = -EINVAL;
- goto out;
- }
-
- while (ib_wr) {
-
- flags = 0;
- wr.sqwr.sq_hdr.user_hdr.hdr.context = ib_wr->wr_id;
- if (ib_wr->send_flags & IB_SEND_SIGNALED) {
- flags |= SQ_SIGNALED;
- }
-
- switch (ib_wr->opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_INV:
- if (ib_wr->opcode == IB_WR_SEND) {
- if (ib_wr->send_flags & IB_SEND_SOLICITED)
- c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE);
- else
- c2_wr_set_id(&wr, C2_WR_TYPE_SEND);
- wr.sqwr.send.remote_stag = 0;
- } else {
- if (ib_wr->send_flags & IB_SEND_SOLICITED)
- c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE_INV);
- else
- c2_wr_set_id(&wr, C2_WR_TYPE_SEND_INV);
- wr.sqwr.send.remote_stag =
- cpu_to_be32(ib_wr->ex.invalidate_rkey);
- }
-
- msg_size = sizeof(struct c2wr_send_req) +
- sizeof(struct c2_data_addr) * ib_wr->num_sge;
- if (ib_wr->num_sge > qp->send_sgl_depth) {
- err = -EINVAL;
- break;
- }
- if (ib_wr->send_flags & IB_SEND_FENCE) {
- flags |= SQ_READ_FENCE;
- }
- err = move_sgl((struct c2_data_addr *) & (wr.sqwr.send.data),
- ib_wr->sg_list,
- ib_wr->num_sge,
- &tot_len, &actual_sge_count);
- wr.sqwr.send.sge_len = cpu_to_be32(tot_len);
- c2_wr_set_sge_count(&wr, actual_sge_count);
- break;
- case IB_WR_RDMA_WRITE:
- c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_WRITE);
- msg_size = sizeof(struct c2wr_rdma_write_req) +
- (sizeof(struct c2_data_addr) * ib_wr->num_sge);
- if (ib_wr->num_sge > qp->rdma_write_sgl_depth) {
- err = -EINVAL;
- break;
- }
- if (ib_wr->send_flags & IB_SEND_FENCE) {
- flags |= SQ_READ_FENCE;
- }
- wr.sqwr.rdma_write.remote_stag =
- cpu_to_be32(rdma_wr(ib_wr)->rkey);
- wr.sqwr.rdma_write.remote_to =
- cpu_to_be64(rdma_wr(ib_wr)->remote_addr);
- err = move_sgl((struct c2_data_addr *)
- & (wr.sqwr.rdma_write.data),
- ib_wr->sg_list,
- ib_wr->num_sge,
- &tot_len, &actual_sge_count);
- wr.sqwr.rdma_write.sge_len = cpu_to_be32(tot_len);
- c2_wr_set_sge_count(&wr, actual_sge_count);
- break;
- case IB_WR_RDMA_READ:
- c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_READ);
- msg_size = sizeof(struct c2wr_rdma_read_req);
-
- /* IWarp only suppots 1 sge for RDMA reads */
- if (ib_wr->num_sge > 1) {
- err = -EINVAL;
- break;
- }
-
- /*
- * Move the local and remote stag/to/len into the WR.
- */
- wr.sqwr.rdma_read.local_stag =
- cpu_to_be32(ib_wr->sg_list->lkey);
- wr.sqwr.rdma_read.local_to =
- cpu_to_be64(ib_wr->sg_list->addr);
- wr.sqwr.rdma_read.remote_stag =
- cpu_to_be32(rdma_wr(ib_wr)->rkey);
- wr.sqwr.rdma_read.remote_to =
- cpu_to_be64(rdma_wr(ib_wr)->remote_addr);
- wr.sqwr.rdma_read.length =
- cpu_to_be32(ib_wr->sg_list->length);
- break;
- default:
- /* error */
- msg_size = 0;
- err = -EINVAL;
- break;
- }
-
- /*
- * If we had an error on the last wr build, then
- * break out. Possible errors include bogus WR
- * type, and a bogus SGL length...
- */
- if (err) {
- break;
- }
-
- /*
- * Store flags
- */
- c2_wr_set_flags(&wr, flags);
-
- /*
- * Post the puppy!
- */
- spin_lock_irqsave(&qp->lock, lock_flags);
- err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size);
- if (err) {
- spin_unlock_irqrestore(&qp->lock, lock_flags);
- break;
- }
-
- /*
- * Enqueue mq index to activity FIFO.
- */
- c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count);
- spin_unlock_irqrestore(&qp->lock, lock_flags);
-
- ib_wr = ib_wr->next;
- }
-
-out:
- if (err)
- *bad_wr = ib_wr;
- return err;
-}
-
-int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
- struct ib_recv_wr **bad_wr)
-{
- struct c2_dev *c2dev = to_c2dev(ibqp->device);
- struct c2_qp *qp = to_c2qp(ibqp);
- union c2wr wr;
- unsigned long lock_flags;
- int err = 0;
-
- if (qp->state > IB_QPS_RTS) {
- err = -EINVAL;
- goto out;
- }
-
- /*
- * Try and post each work request
- */
- while (ib_wr) {
- u32 tot_len;
- u8 actual_sge_count;
-
- if (ib_wr->num_sge > qp->recv_sgl_depth) {
- err = -EINVAL;
- break;
- }
-
- /*
- * Create local host-copy of the WR
- */
- wr.rqwr.rq_hdr.user_hdr.hdr.context = ib_wr->wr_id;
- c2_wr_set_id(&wr, CCWR_RECV);
- c2_wr_set_flags(&wr, 0);
-
- /* sge_count is limited to eight bits. */
- BUG_ON(ib_wr->num_sge >= 256);
- err = move_sgl((struct c2_data_addr *) & (wr.rqwr.data),
- ib_wr->sg_list,
- ib_wr->num_sge, &tot_len, &actual_sge_count);
- c2_wr_set_sge_count(&wr, actual_sge_count);
-
- /*
- * If we had an error on the last wr build, then
- * break out. Possible errors include bogus WR
- * type, and a bogus SGL length...
- */
- if (err) {
- break;
- }
-
- spin_lock_irqsave(&qp->lock, lock_flags);
- err = qp_wr_post(&qp->rq_mq, &wr, qp, qp->rq_mq.msg_size);
- if (err) {
- spin_unlock_irqrestore(&qp->lock, lock_flags);
- break;
- }
-
- /*
- * Enqueue mq index to activity FIFO
- */
- c2_activity(c2dev, qp->rq_mq.index, qp->rq_mq.hint_count);
- spin_unlock_irqrestore(&qp->lock, lock_flags);
-
- ib_wr = ib_wr->next;
- }
-
-out:
- if (err)
- *bad_wr = ib_wr;
- return err;
-}
-
-void c2_init_qp_table(struct c2_dev *c2dev)
-{
- spin_lock_init(&c2dev->qp_table.lock);
- idr_init(&c2dev->qp_table.idr);
-}
-
-void c2_cleanup_qp_table(struct c2_dev *c2dev)
-{
- idr_destroy(&c2dev->qp_table.idr);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_rnic.c b/drivers/staging/rdma/amso1100/c2_rnic.c
deleted file mode 100644
index 5e65c6d07ca4..000000000000
--- a/drivers/staging/rdma/amso1100/c2_rnic.c
+++ /dev/null
@@ -1,652 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/if_vlan.h>
-#include <linux/crc32.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/mm.h>
-#include <linux/inet.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-
-#include <linux/route.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-#include <rdma/ib_smi.h>
-#include "c2.h"
-#include "c2_vq.h"
-
-/* Device capabilities */
-#define C2_MIN_PAGESIZE 1024
-
-#define C2_MAX_MRS 32768
-#define C2_MAX_QPS 16000
-#define C2_MAX_WQE_SZ 256
-#define C2_MAX_QP_WR ((128*1024)/C2_MAX_WQE_SZ)
-#define C2_MAX_SGES 4
-#define C2_MAX_SGE_RD 1
-#define C2_MAX_CQS 32768
-#define C2_MAX_CQES 4096
-#define C2_MAX_PDS 16384
-
-/*
- * Send the adapter INIT message to the amso1100
- */
-static int c2_adapter_init(struct c2_dev *c2dev)
-{
- struct c2wr_init_req wr;
-
- memset(&wr, 0, sizeof(wr));
- c2_wr_set_id(&wr, CCWR_INIT);
- wr.hdr.context = 0;
- wr.hint_count = cpu_to_be64(c2dev->hint_count_dma);
- wr.q0_host_shared = cpu_to_be64(c2dev->req_vq.shared_dma);
- wr.q1_host_shared = cpu_to_be64(c2dev->rep_vq.shared_dma);
- wr.q1_host_msg_pool = cpu_to_be64(c2dev->rep_vq.host_dma);
- wr.q2_host_shared = cpu_to_be64(c2dev->aeq.shared_dma);
- wr.q2_host_msg_pool = cpu_to_be64(c2dev->aeq.host_dma);
-
- /* Post the init message */
- return vq_send_wr(c2dev, (union c2wr *) & wr);
-}
-
-/*
- * Send the adapter TERM message to the amso1100
- */
-static void c2_adapter_term(struct c2_dev *c2dev)
-{
- struct c2wr_init_req wr;
-
- memset(&wr, 0, sizeof(wr));
- c2_wr_set_id(&wr, CCWR_TERM);
- wr.hdr.context = 0;
-
- /* Post the init message */
- vq_send_wr(c2dev, (union c2wr *) & wr);
- c2dev->init = 0;
-
- return;
-}
-
-/*
- * Query the adapter
- */
-static int c2_rnic_query(struct c2_dev *c2dev, struct ib_device_attr *props)
-{
- struct c2_vq_req *vq_req;
- struct c2wr_rnic_query_req wr;
- struct c2wr_rnic_query_rep *reply;
- int err;
-
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req)
- return -ENOMEM;
-
- c2_wr_set_id(&wr, CCWR_RNIC_QUERY);
- wr.hdr.context = (unsigned long) vq_req;
- wr.rnic_handle = c2dev->adapter_handle;
-
- vq_req_get(c2dev, vq_req);
-
- err = vq_send_wr(c2dev, (union c2wr *) &wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail1;
- }
-
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err)
- goto bail1;
-
- reply =
- (struct c2wr_rnic_query_rep *) (unsigned long) (vq_req->reply_msg);
- if (!reply)
- err = -ENOMEM;
- else
- err = c2_errno(reply);
- if (err)
- goto bail2;
-
- props->fw_ver =
- ((u64)be32_to_cpu(reply->fw_ver_major) << 32) |
- ((be32_to_cpu(reply->fw_ver_minor) & 0xFFFF) << 16) |
- (be32_to_cpu(reply->fw_ver_patch) & 0xFFFF);
- memcpy(&props->sys_image_guid, c2dev->netdev->dev_addr, 6);
- props->max_mr_size = 0xFFFFFFFF;
- props->page_size_cap = ~(C2_MIN_PAGESIZE-1);
- props->vendor_id = be32_to_cpu(reply->vendor_id);
- props->vendor_part_id = be32_to_cpu(reply->part_number);
- props->hw_ver = be32_to_cpu(reply->hw_version);
- props->max_qp = be32_to_cpu(reply->max_qps);
- props->max_qp_wr = be32_to_cpu(reply->max_qp_depth);
- props->device_cap_flags = c2dev->device_cap_flags;
- props->max_sge = C2_MAX_SGES;
- props->max_sge_rd = C2_MAX_SGE_RD;
- props->max_cq = be32_to_cpu(reply->max_cqs);
- props->max_cqe = be32_to_cpu(reply->max_cq_depth);
- props->max_mr = be32_to_cpu(reply->max_mrs);
- props->max_pd = be32_to_cpu(reply->max_pds);
- props->max_qp_rd_atom = be32_to_cpu(reply->max_qp_ird);
- props->max_ee_rd_atom = 0;
- props->max_res_rd_atom = be32_to_cpu(reply->max_global_ird);
- props->max_qp_init_rd_atom = be32_to_cpu(reply->max_qp_ord);
- props->max_ee_init_rd_atom = 0;
- props->atomic_cap = IB_ATOMIC_NONE;
- props->max_ee = 0;
- props->max_rdd = 0;
- props->max_mw = be32_to_cpu(reply->max_mws);
- props->max_raw_ipv6_qp = 0;
- props->max_raw_ethy_qp = 0;
- props->max_mcast_grp = 0;
- props->max_mcast_qp_attach = 0;
- props->max_total_mcast_qp_attach = 0;
- props->max_ah = 0;
- props->max_fmr = 0;
- props->max_map_per_fmr = 0;
- props->max_srq = 0;
- props->max_srq_wr = 0;
- props->max_srq_sge = 0;
- props->max_pkeys = 0;
- props->local_ca_ack_delay = 0;
-
- bail2:
- vq_repbuf_free(c2dev, reply);
-
- bail1:
- vq_req_free(c2dev, vq_req);
- return err;
-}
-
-/*
- * Add an IP address to the RNIC interface
- */
-int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
-{
- struct c2_vq_req *vq_req;
- struct c2wr_rnic_setconfig_req *wr;
- struct c2wr_rnic_setconfig_rep *reply;
- struct c2_netaddr netaddr;
- int err, len;
-
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req)
- return -ENOMEM;
-
- len = sizeof(struct c2_netaddr);
- wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
- if (!wr) {
- err = -ENOMEM;
- goto bail0;
- }
-
- c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG);
- wr->hdr.context = (unsigned long) vq_req;
- wr->rnic_handle = c2dev->adapter_handle;
- wr->option = cpu_to_be32(C2_CFG_ADD_ADDR);
-
- netaddr.ip_addr = inaddr;
- netaddr.netmask = inmask;
- netaddr.mtu = 0;
-
- memcpy(wr->data, &netaddr, len);
-
- vq_req_get(c2dev, vq_req);
-
- err = vq_send_wr(c2dev, (union c2wr *) wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail1;
- }
-
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err)
- goto bail1;
-
- reply =
- (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg);
- if (!reply) {
- err = -ENOMEM;
- goto bail1;
- }
-
- err = c2_errno(reply);
- vq_repbuf_free(c2dev, reply);
-
-bail1:
- kfree(wr);
-bail0:
- vq_req_free(c2dev, vq_req);
- return err;
-}
-
-/*
- * Delete an IP address from the RNIC interface
- */
-int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
-{
- struct c2_vq_req *vq_req;
- struct c2wr_rnic_setconfig_req *wr;
- struct c2wr_rnic_setconfig_rep *reply;
- struct c2_netaddr netaddr;
- int err, len;
-
- vq_req = vq_req_alloc(c2dev);
- if (!vq_req)
- return -ENOMEM;
-
- len = sizeof(struct c2_netaddr);
- wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
- if (!wr) {
- err = -ENOMEM;
- goto bail0;
- }
-
- c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG);
- wr->hdr.context = (unsigned long) vq_req;
- wr->rnic_handle = c2dev->adapter_handle;
- wr->option = cpu_to_be32(C2_CFG_DEL_ADDR);
-
- netaddr.ip_addr = inaddr;
- netaddr.netmask = inmask;
- netaddr.mtu = 0;
-
- memcpy(wr->data, &netaddr, len);
-
- vq_req_get(c2dev, vq_req);
-
- err = vq_send_wr(c2dev, (union c2wr *) wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail1;
- }
-
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err)
- goto bail1;
-
- reply =
- (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg);
- if (!reply) {
- err = -ENOMEM;
- goto bail1;
- }
-
- err = c2_errno(reply);
- vq_repbuf_free(c2dev, reply);
-
-bail1:
- kfree(wr);
-bail0:
- vq_req_free(c2dev, vq_req);
- return err;
-}
-
-/*
- * Open a single RNIC instance to use with all
- * low level openib calls
- */
-static int c2_rnic_open(struct c2_dev *c2dev)
-{
- struct c2_vq_req *vq_req;
- union c2wr wr;
- struct c2wr_rnic_open_rep *reply;
- int err;
-
- vq_req = vq_req_alloc(c2dev);
- if (vq_req == NULL) {
- return -ENOMEM;
- }
-
- memset(&wr, 0, sizeof(wr));
- c2_wr_set_id(&wr, CCWR_RNIC_OPEN);
- wr.rnic_open.req.hdr.context = (unsigned long) (vq_req);
- wr.rnic_open.req.flags = cpu_to_be16(RNIC_PRIV_MODE);
- wr.rnic_open.req.port_num = cpu_to_be16(0);
- wr.rnic_open.req.user_context = (unsigned long) c2dev;
-
- vq_req_get(c2dev, vq_req);
-
- err = vq_send_wr(c2dev, &wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail0;
- }
-
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err) {
- goto bail0;
- }
-
- reply = (struct c2wr_rnic_open_rep *) (unsigned long) (vq_req->reply_msg);
- if (!reply) {
- err = -ENOMEM;
- goto bail0;
- }
-
- if ((err = c2_errno(reply)) != 0) {
- goto bail1;
- }
-
- c2dev->adapter_handle = reply->rnic_handle;
-
-bail1:
- vq_repbuf_free(c2dev, reply);
-bail0:
- vq_req_free(c2dev, vq_req);
- return err;
-}
-
-/*
- * Close the RNIC instance
- */
-static int c2_rnic_close(struct c2_dev *c2dev)
-{
- struct c2_vq_req *vq_req;
- union c2wr wr;
- struct c2wr_rnic_close_rep *reply;
- int err;
-
- vq_req = vq_req_alloc(c2dev);
- if (vq_req == NULL) {
- return -ENOMEM;
- }
-
- memset(&wr, 0, sizeof(wr));
- c2_wr_set_id(&wr, CCWR_RNIC_CLOSE);
- wr.rnic_close.req.hdr.context = (unsigned long) vq_req;
- wr.rnic_close.req.rnic_handle = c2dev->adapter_handle;
-
- vq_req_get(c2dev, vq_req);
-
- err = vq_send_wr(c2dev, &wr);
- if (err) {
- vq_req_put(c2dev, vq_req);
- goto bail0;
- }
-
- err = vq_wait_for_reply(c2dev, vq_req);
- if (err) {
- goto bail0;
- }
-
- reply = (struct c2wr_rnic_close_rep *) (unsigned long) (vq_req->reply_msg);
- if (!reply) {
- err = -ENOMEM;
- goto bail0;
- }
-
- if ((err = c2_errno(reply)) != 0) {
- goto bail1;
- }
-
- c2dev->adapter_handle = 0;
-
-bail1:
- vq_repbuf_free(c2dev, reply);
-bail0:
- vq_req_free(c2dev, vq_req);
- return err;
-}
-
-/*
- * Called by c2_probe to initialize the RNIC. This principally
- * involves initializing the various limits and resource pools that
- * comprise the RNIC instance.
- */
-int c2_rnic_init(struct c2_dev *c2dev)
-{
- int err;
- u32 qsize, msgsize;
- void *q1_pages;
- void *q2_pages;
- void __iomem *mmio_regs;
-
- /* Device capabilities */
- c2dev->device_cap_flags =
- (IB_DEVICE_RESIZE_MAX_WR |
- IB_DEVICE_CURR_QP_STATE_MOD |
- IB_DEVICE_SYS_IMAGE_GUID |
- IB_DEVICE_LOCAL_DMA_LKEY |
- IB_DEVICE_MEM_WINDOW);
-
- /* Allocate the qptr_array */
- c2dev->qptr_array = vzalloc(C2_MAX_CQS * sizeof(void *));
- if (!c2dev->qptr_array) {
- return -ENOMEM;
- }
-
- /* Initialize the qptr_array */
- c2dev->qptr_array[0] = (void *) &c2dev->req_vq;
- c2dev->qptr_array[1] = (void *) &c2dev->rep_vq;
- c2dev->qptr_array[2] = (void *) &c2dev->aeq;
-
- /* Initialize data structures */
- init_waitqueue_head(&c2dev->req_vq_wo);
- spin_lock_init(&c2dev->vqlock);
- spin_lock_init(&c2dev->lock);
-
- /* Allocate MQ shared pointer pool for kernel clients. User
- * mode client pools are hung off the user context
- */
- err = c2_init_mqsp_pool(c2dev, GFP_KERNEL, &c2dev->kern_mqsp_pool);
- if (err) {
- goto bail0;
- }
-
- /* Allocate shared pointers for Q0, Q1, and Q2 from
- * the shared pointer pool.
- */
-
- c2dev->hint_count = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
- &c2dev->hint_count_dma,
- GFP_KERNEL);
- c2dev->req_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
- &c2dev->req_vq.shared_dma,
- GFP_KERNEL);
- c2dev->rep_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
- &c2dev->rep_vq.shared_dma,
- GFP_KERNEL);
- c2dev->aeq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
- &c2dev->aeq.shared_dma, GFP_KERNEL);
- if (!c2dev->hint_count || !c2dev->req_vq.shared ||
- !c2dev->rep_vq.shared || !c2dev->aeq.shared) {
- err = -ENOMEM;
- goto bail1;
- }
-
- mmio_regs = c2dev->kva;
- /* Initialize the Verbs Request Queue */
- c2_mq_req_init(&c2dev->req_vq, 0,
- be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_QSIZE)),
- be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_MSGSIZE)),
- mmio_regs +
- be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_POOLSTART)),
- mmio_regs +
- be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_SHARED)),
- C2_MQ_ADAPTER_TARGET);
-
- /* Initialize the Verbs Reply Queue */
- qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_QSIZE));
- msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
- q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
- &c2dev->rep_vq.host_dma, GFP_KERNEL);
- if (!q1_pages) {
- err = -ENOMEM;
- goto bail1;
- }
- dma_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
- pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages,
- (unsigned long long) c2dev->rep_vq.host_dma);
- c2_mq_rep_init(&c2dev->rep_vq,
- 1,
- qsize,
- msgsize,
- q1_pages,
- mmio_regs +
- be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_SHARED)),
- C2_MQ_HOST_TARGET);
-
- /* Initialize the Asynchronus Event Queue */
- qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_QSIZE));
- msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
- q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
- &c2dev->aeq.host_dma, GFP_KERNEL);
- if (!q2_pages) {
- err = -ENOMEM;
- goto bail2;
- }
- dma_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
- pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages,
- (unsigned long long) c2dev->aeq.host_dma);
- c2_mq_rep_init(&c2dev->aeq,
- 2,
- qsize,
- msgsize,
- q2_pages,
- mmio_regs +
- be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_SHARED)),
- C2_MQ_HOST_TARGET);
-
- /* Initialize the verbs request allocator */
- err = vq_init(c2dev);
- if (err)
- goto bail3;
-
- /* Enable interrupts on the adapter */
- writel(0, c2dev->regs + C2_IDIS);
-
- /* create the WR init message */
- err = c2_adapter_init(c2dev);
- if (err)
- goto bail4;
- c2dev->init++;
-
- /* open an adapter instance */
- err = c2_rnic_open(c2dev);
- if (err)
- goto bail4;
-
- /* Initialize cached the adapter limits */
- err = c2_rnic_query(c2dev, &c2dev->props);
- if (err)
- goto bail5;
-
- /* Initialize the PD pool */
- err = c2_init_pd_table(c2dev);
- if (err)
- goto bail5;
-
- /* Initialize the QP pool */
- c2_init_qp_table(c2dev);
- return 0;
-
-bail5:
- c2_rnic_close(c2dev);
-bail4:
- vq_term(c2dev);
-bail3:
- dma_free_coherent(&c2dev->pcidev->dev,
- c2dev->aeq.q_size * c2dev->aeq.msg_size,
- q2_pages, dma_unmap_addr(&c2dev->aeq, mapping));
-bail2:
- dma_free_coherent(&c2dev->pcidev->dev,
- c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
- q1_pages, dma_unmap_addr(&c2dev->rep_vq, mapping));
-bail1:
- c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
-bail0:
- vfree(c2dev->qptr_array);
-
- return err;
-}
-
-/*
- * Called by c2_remove to cleanup the RNIC resources.
- */
-void c2_rnic_term(struct c2_dev *c2dev)
-{
-
- /* Close the open adapter instance */
- c2_rnic_close(c2dev);
-
- /* Send the TERM message to the adapter */
- c2_adapter_term(c2dev);
-
- /* Disable interrupts on the adapter */
- writel(1, c2dev->regs + C2_IDIS);
-
- /* Free the QP pool */
- c2_cleanup_qp_table(c2dev);
-
- /* Free the PD pool */
- c2_cleanup_pd_table(c2dev);
-
- /* Free the verbs request allocator */
- vq_term(c2dev);
-
- /* Free the asynchronus event queue */
- dma_free_coherent(&c2dev->pcidev->dev,
- c2dev->aeq.q_size * c2dev->aeq.msg_size,
- c2dev->aeq.msg_pool.host,
- dma_unmap_addr(&c2dev->aeq, mapping));
-
- /* Free the verbs reply queue */
- dma_free_coherent(&c2dev->pcidev->dev,
- c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
- c2dev->rep_vq.msg_pool.host,
- dma_unmap_addr(&c2dev->rep_vq, mapping));
-
- /* Free the MQ shared pointer pool */
- c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
-
- /* Free the qptr_array */
- vfree(c2dev->qptr_array);
-
- return;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_status.h b/drivers/staging/rdma/amso1100/c2_status.h
deleted file mode 100644
index 6ee4aa92d875..000000000000
--- a/drivers/staging/rdma/amso1100/c2_status.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _C2_STATUS_H_
-#define _C2_STATUS_H_
-
-/*
- * Verbs Status Codes
- */
-enum c2_status {
- C2_OK = 0, /* This must be zero */
- CCERR_INSUFFICIENT_RESOURCES = 1,
- CCERR_INVALID_MODIFIER = 2,
- CCERR_INVALID_MODE = 3,
- CCERR_IN_USE = 4,
- CCERR_INVALID_RNIC = 5,
- CCERR_INTERRUPTED_OPERATION = 6,
- CCERR_INVALID_EH = 7,
- CCERR_INVALID_CQ = 8,
- CCERR_CQ_EMPTY = 9,
- CCERR_NOT_IMPLEMENTED = 10,
- CCERR_CQ_DEPTH_TOO_SMALL = 11,
- CCERR_PD_IN_USE = 12,
- CCERR_INVALID_PD = 13,
- CCERR_INVALID_SRQ = 14,
- CCERR_INVALID_ADDRESS = 15,
- CCERR_INVALID_NETMASK = 16,
- CCERR_INVALID_QP = 17,
- CCERR_INVALID_QP_STATE = 18,
- CCERR_TOO_MANY_WRS_POSTED = 19,
- CCERR_INVALID_WR_TYPE = 20,
- CCERR_INVALID_SGL_LENGTH = 21,
- CCERR_INVALID_SQ_DEPTH = 22,
- CCERR_INVALID_RQ_DEPTH = 23,
- CCERR_INVALID_ORD = 24,
- CCERR_INVALID_IRD = 25,
- CCERR_QP_ATTR_CANNOT_CHANGE = 26,
- CCERR_INVALID_STAG = 27,
- CCERR_QP_IN_USE = 28,
- CCERR_OUTSTANDING_WRS = 29,
- CCERR_STAG_IN_USE = 30,
- CCERR_INVALID_STAG_INDEX = 31,
- CCERR_INVALID_SGL_FORMAT = 32,
- CCERR_ADAPTER_TIMEOUT = 33,
- CCERR_INVALID_CQ_DEPTH = 34,
- CCERR_INVALID_PRIVATE_DATA_LENGTH = 35,
- CCERR_INVALID_EP = 36,
- CCERR_MR_IN_USE = CCERR_STAG_IN_USE,
- CCERR_FLUSHED = 38,
- CCERR_INVALID_WQE = 39,
- CCERR_LOCAL_QP_CATASTROPHIC_ERROR = 40,
- CCERR_REMOTE_TERMINATION_ERROR = 41,
- CCERR_BASE_AND_BOUNDS_VIOLATION = 42,
- CCERR_ACCESS_VIOLATION = 43,
- CCERR_INVALID_PD_ID = 44,
- CCERR_WRAP_ERROR = 45,
- CCERR_INV_STAG_ACCESS_ERROR = 46,
- CCERR_ZERO_RDMA_READ_RESOURCES = 47,
- CCERR_QP_NOT_PRIVILEGED = 48,
- CCERR_STAG_STATE_NOT_INVALID = 49,
- CCERR_INVALID_PAGE_SIZE = 50,
- CCERR_INVALID_BUFFER_SIZE = 51,
- CCERR_INVALID_PBE = 52,
- CCERR_INVALID_FBO = 53,
- CCERR_INVALID_LENGTH = 54,
- CCERR_INVALID_ACCESS_RIGHTS = 55,
- CCERR_PBL_TOO_BIG = 56,
- CCERR_INVALID_VA = 57,
- CCERR_INVALID_REGION = 58,
- CCERR_INVALID_WINDOW = 59,
- CCERR_TOTAL_LENGTH_TOO_BIG = 60,
- CCERR_INVALID_QP_ID = 61,
- CCERR_ADDR_IN_USE = 62,
- CCERR_ADDR_NOT_AVAIL = 63,
- CCERR_NET_DOWN = 64,
- CCERR_NET_UNREACHABLE = 65,
- CCERR_CONN_ABORTED = 66,
- CCERR_CONN_RESET = 67,
- CCERR_NO_BUFS = 68,
- CCERR_CONN_TIMEDOUT = 69,
- CCERR_CONN_REFUSED = 70,
- CCERR_HOST_UNREACHABLE = 71,
- CCERR_INVALID_SEND_SGL_DEPTH = 72,
- CCERR_INVALID_RECV_SGL_DEPTH = 73,
- CCERR_INVALID_RDMA_WRITE_SGL_DEPTH = 74,
- CCERR_INSUFFICIENT_PRIVILEGES = 75,
- CCERR_STACK_ERROR = 76,
- CCERR_INVALID_VERSION = 77,
- CCERR_INVALID_MTU = 78,
- CCERR_INVALID_IMAGE = 79,
- CCERR_PENDING = 98, /* not an error; user internally by adapter */
- CCERR_DEFER = 99, /* not an error; used internally by adapter */
- CCERR_FAILED_WRITE = 100,
- CCERR_FAILED_ERASE = 101,
- CCERR_FAILED_VERIFICATION = 102,
- CCERR_NOT_FOUND = 103,
-
-};
-
-/*
- * CCAE_ACTIVE_CONNECT_RESULTS status result codes.
- */
-enum c2_connect_status {
- C2_CONN_STATUS_SUCCESS = C2_OK,
- C2_CONN_STATUS_NO_MEM = CCERR_INSUFFICIENT_RESOURCES,
- C2_CONN_STATUS_TIMEDOUT = CCERR_CONN_TIMEDOUT,
- C2_CONN_STATUS_REFUSED = CCERR_CONN_REFUSED,
- C2_CONN_STATUS_NETUNREACH = CCERR_NET_UNREACHABLE,
- C2_CONN_STATUS_HOSTUNREACH = CCERR_HOST_UNREACHABLE,
- C2_CONN_STATUS_INVALID_RNIC = CCERR_INVALID_RNIC,
- C2_CONN_STATUS_INVALID_QP = CCERR_INVALID_QP,
- C2_CONN_STATUS_INVALID_QP_STATE = CCERR_INVALID_QP_STATE,
- C2_CONN_STATUS_REJECTED = CCERR_CONN_RESET,
- C2_CONN_STATUS_ADDR_NOT_AVAIL = CCERR_ADDR_NOT_AVAIL,
-};
-
-/*
- * Flash programming status codes.
- */
-enum c2_flash_status {
- C2_FLASH_STATUS_SUCCESS = 0x0000,
- C2_FLASH_STATUS_VERIFY_ERR = 0x0002,
- C2_FLASH_STATUS_IMAGE_ERR = 0x0004,
- C2_FLASH_STATUS_ECLBS = 0x0400,
- C2_FLASH_STATUS_PSLBS = 0x0800,
- C2_FLASH_STATUS_VPENS = 0x1000,
-};
-
-#endif /* _C2_STATUS_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_user.h b/drivers/staging/rdma/amso1100/c2_user.h
deleted file mode 100644
index 7e9e7ad65467..000000000000
--- a/drivers/staging/rdma/amso1100/c2_user.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef C2_USER_H
-#define C2_USER_H
-
-#include <linux/types.h>
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
- */
-
-struct c2_alloc_ucontext_resp {
- __u32 qp_tab_size;
- __u32 uarc_size;
-};
-
-struct c2_alloc_pd_resp {
- __u32 pdn;
- __u32 reserved;
-};
-
-struct c2_create_cq {
- __u32 lkey;
- __u32 pdn;
- __u64 arm_db_page;
- __u64 set_db_page;
- __u32 arm_db_index;
- __u32 set_db_index;
-};
-
-struct c2_create_cq_resp {
- __u32 cqn;
- __u32 reserved;
-};
-
-struct c2_create_qp {
- __u32 lkey;
- __u32 reserved;
- __u64 sq_db_page;
- __u64 rq_db_page;
- __u32 sq_db_index;
- __u32 rq_db_index;
-};
-
-#endif /* C2_USER_H */
diff --git a/drivers/staging/rdma/amso1100/c2_vq.c b/drivers/staging/rdma/amso1100/c2_vq.c
deleted file mode 100644
index 2ec716fb2edb..000000000000
--- a/drivers/staging/rdma/amso1100/c2_vq.c
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-
-#include "c2_vq.h"
-#include "c2_provider.h"
-
-/*
- * Verbs Request Objects:
- *
- * VQ Request Objects are allocated by the kernel verbs handlers.
- * They contain a wait object, a refcnt, an atomic bool indicating that the
- * adapter has replied, and a copy of the verb reply work request.
- * A pointer to the VQ Request Object is passed down in the context
- * field of the work request message, and reflected back by the adapter
- * in the verbs reply message. The function handle_vq() in the interrupt
- * path will use this pointer to:
- * 1) append a copy of the verbs reply message
- * 2) mark that the reply is ready
- * 3) wake up the kernel verbs handler blocked awaiting the reply.
- *
- *
- * The kernel verbs handlers do a "get" to put a 2nd reference on the
- * VQ Request object. If the kernel verbs handler exits before the adapter
- * can respond, this extra reference will keep the VQ Request object around
- * until the adapter's reply can be processed. The reason we need this is
- * because a pointer to this object is stuffed into the context field of
- * the verbs work request message, and reflected back in the reply message.
- * It is used in the interrupt handler (handle_vq()) to wake up the appropriate
- * kernel verb handler that is blocked awaiting the verb reply.
- * So handle_vq() will do a "put" on the object when it's done accessing it.
- * NOTE: If we guarantee that the kernel verb handler will never bail before
- * getting the reply, then we don't need these refcnts.
- *
- *
- * VQ Request objects are freed by the kernel verbs handlers only
- * after the verb has been processed, or when the adapter fails and
- * does not reply.
- *
- *
- * Verbs Reply Buffers:
- *
- * VQ Reply bufs are local host memory copies of a
- * outstanding Verb Request reply
- * message. The are always allocated by the kernel verbs handlers, and _may_ be
- * freed by either the kernel verbs handler -or- the interrupt handler. The
- * kernel verbs handler _must_ free the repbuf, then free the vq request object
- * in that order.
- */
-
-int vq_init(struct c2_dev *c2dev)
-{
- sprintf(c2dev->vq_cache_name, "c2-vq:dev%c",
- (char) ('0' + c2dev->devnum));
- c2dev->host_msg_cache =
- kmem_cache_create(c2dev->vq_cache_name, c2dev->rep_vq.msg_size, 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (c2dev->host_msg_cache == NULL) {
- return -ENOMEM;
- }
- return 0;
-}
-
-void vq_term(struct c2_dev *c2dev)
-{
- kmem_cache_destroy(c2dev->host_msg_cache);
-}
-
-/* vq_req_alloc - allocate a VQ Request Object and initialize it.
- * The refcnt is set to 1.
- */
-struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev)
-{
- struct c2_vq_req *r;
-
- r = kmalloc(sizeof(struct c2_vq_req), GFP_KERNEL);
- if (r) {
- init_waitqueue_head(&r->wait_object);
- r->reply_msg = 0;
- r->event = 0;
- r->cm_id = NULL;
- r->qp = NULL;
- atomic_set(&r->refcnt, 1);
- atomic_set(&r->reply_ready, 0);
- }
- return r;
-}
-
-
-/* vq_req_free - free the VQ Request Object. It is assumed the verbs handler
- * has already free the VQ Reply Buffer if it existed.
- */
-void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r)
-{
- r->reply_msg = 0;
- if (atomic_dec_and_test(&r->refcnt)) {
- kfree(r);
- }
-}
-
-/* vq_req_get - reference a VQ Request Object. Done
- * only in the kernel verbs handlers.
- */
-void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *r)
-{
- atomic_inc(&r->refcnt);
-}
-
-
-/* vq_req_put - dereference and potentially free a VQ Request Object.
- *
- * This is only called by handle_vq() on the
- * interrupt when it is done processing
- * a verb reply message. If the associated
- * kernel verbs handler has already bailed,
- * then this put will actually free the VQ
- * Request object _and_ the VQ Reply Buffer
- * if it exists.
- */
-void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r)
-{
- if (atomic_dec_and_test(&r->refcnt)) {
- if (r->reply_msg != 0)
- vq_repbuf_free(c2dev,
- (void *) (unsigned long) r->reply_msg);
- kfree(r);
- }
-}
-
-
-/*
- * vq_repbuf_alloc - allocate a VQ Reply Buffer.
- */
-void *vq_repbuf_alloc(struct c2_dev *c2dev)
-{
- return kmem_cache_alloc(c2dev->host_msg_cache, GFP_ATOMIC);
-}
-
-/*
- * vq_send_wr - post a verbs request message to the Verbs Request Queue.
- * If a message is not available in the MQ, then block until one is available.
- * NOTE: handle_mq() on the interrupt context will wake up threads blocked here.
- * When the adapter drains the Verbs Request Queue,
- * it inserts MQ index 0 in to the
- * adapter->host activity fifo and interrupts the host.
- */
-int vq_send_wr(struct c2_dev *c2dev, union c2wr *wr)
-{
- void *msg;
- wait_queue_t __wait;
-
- /*
- * grab adapter vq lock
- */
- spin_lock(&c2dev->vqlock);
-
- /*
- * allocate msg
- */
- msg = c2_mq_alloc(&c2dev->req_vq);
-
- /*
- * If we cannot get a msg, then we'll wait
- * When a messages are available, the int handler will wake_up()
- * any waiters.
- */
- while (msg == NULL) {
- pr_debug("%s:%d no available msg in VQ, waiting...\n",
- __func__, __LINE__);
- init_waitqueue_entry(&__wait, current);
- add_wait_queue(&c2dev->req_vq_wo, &__wait);
- spin_unlock(&c2dev->vqlock);
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (!c2_mq_full(&c2dev->req_vq)) {
- break;
- }
- if (!signal_pending(current)) {
- schedule_timeout(1 * HZ); /* 1 second... */
- continue;
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&c2dev->req_vq_wo, &__wait);
- return -EINTR;
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&c2dev->req_vq_wo, &__wait);
- spin_lock(&c2dev->vqlock);
- msg = c2_mq_alloc(&c2dev->req_vq);
- }
-
- /*
- * copy wr into adapter msg
- */
- memcpy(msg, wr, c2dev->req_vq.msg_size);
-
- /*
- * post msg
- */
- c2_mq_produce(&c2dev->req_vq);
-
- /*
- * release adapter vq lock
- */
- spin_unlock(&c2dev->vqlock);
- return 0;
-}
-
-
-/*
- * vq_wait_for_reply - block until the adapter posts a Verb Reply Message.
- */
-int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req)
-{
- if (!wait_event_timeout(req->wait_object,
- atomic_read(&req->reply_ready),
- 60*HZ))
- return -ETIMEDOUT;
-
- return 0;
-}
-
-/*
- * vq_repbuf_free - Free a Verbs Reply Buffer.
- */
-void vq_repbuf_free(struct c2_dev *c2dev, void *reply)
-{
- kmem_cache_free(c2dev->host_msg_cache, reply);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_vq.h b/drivers/staging/rdma/amso1100/c2_vq.h
deleted file mode 100644
index c1f6cef60213..000000000000
--- a/drivers/staging/rdma/amso1100/c2_vq.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _C2_VQ_H_
-#define _C2_VQ_H_
-#include <linux/sched.h>
-#include "c2.h"
-#include "c2_wr.h"
-#include "c2_provider.h"
-
-struct c2_vq_req {
- u64 reply_msg; /* ptr to reply msg */
- wait_queue_head_t wait_object; /* wait object for vq reqs */
- atomic_t reply_ready; /* set when reply is ready */
- atomic_t refcnt; /* used to cancel WRs... */
- int event;
- struct iw_cm_id *cm_id;
- struct c2_qp *qp;
-};
-
-int vq_init(struct c2_dev *c2dev);
-void vq_term(struct c2_dev *c2dev);
-
-struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev);
-void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *req);
-void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *req);
-void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *req);
-int vq_send_wr(struct c2_dev *c2dev, union c2wr * wr);
-
-void *vq_repbuf_alloc(struct c2_dev *c2dev);
-void vq_repbuf_free(struct c2_dev *c2dev, void *reply);
-
-int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req);
-#endif /* _C2_VQ_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_wr.h b/drivers/staging/rdma/amso1100/c2_wr.h
deleted file mode 100644
index 8d4b4ca463ca..000000000000
--- a/drivers/staging/rdma/amso1100/c2_wr.h
+++ /dev/null
@@ -1,1520 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _C2_WR_H_
-#define _C2_WR_H_
-
-#ifdef CCDEBUG
-#define CCWR_MAGIC 0xb07700b0
-#endif
-
-#define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF
-
-/* Maximum allowed size in bytes of private_data exchange
- * on connect.
- */
-#define C2_MAX_PRIVATE_DATA_SIZE 200
-
-/*
- * These types are shared among the adapter, host, and CCIL consumer.
- */
-enum c2_cq_notification_type {
- C2_CQ_NOTIFICATION_TYPE_NONE = 1,
- C2_CQ_NOTIFICATION_TYPE_NEXT,
- C2_CQ_NOTIFICATION_TYPE_NEXT_SE
-};
-
-enum c2_setconfig_cmd {
- C2_CFG_ADD_ADDR = 1,
- C2_CFG_DEL_ADDR = 2,
- C2_CFG_ADD_ROUTE = 3,
- C2_CFG_DEL_ROUTE = 4
-};
-
-enum c2_getconfig_cmd {
- C2_GETCONFIG_ROUTES = 1,
- C2_GETCONFIG_ADDRS
-};
-
-/*
- * CCIL Work Request Identifiers
- */
-enum c2wr_ids {
- CCWR_RNIC_OPEN = 1,
- CCWR_RNIC_QUERY,
- CCWR_RNIC_SETCONFIG,
- CCWR_RNIC_GETCONFIG,
- CCWR_RNIC_CLOSE,
- CCWR_CQ_CREATE,
- CCWR_CQ_QUERY,
- CCWR_CQ_MODIFY,
- CCWR_CQ_DESTROY,
- CCWR_QP_CONNECT,
- CCWR_PD_ALLOC,
- CCWR_PD_DEALLOC,
- CCWR_SRQ_CREATE,
- CCWR_SRQ_QUERY,
- CCWR_SRQ_MODIFY,
- CCWR_SRQ_DESTROY,
- CCWR_QP_CREATE,
- CCWR_QP_QUERY,
- CCWR_QP_MODIFY,
- CCWR_QP_DESTROY,
- CCWR_NSMR_STAG_ALLOC,
- CCWR_NSMR_REGISTER,
- CCWR_NSMR_PBL,
- CCWR_STAG_DEALLOC,
- CCWR_NSMR_REREGISTER,
- CCWR_SMR_REGISTER,
- CCWR_MR_QUERY,
- CCWR_MW_ALLOC,
- CCWR_MW_QUERY,
- CCWR_EP_CREATE,
- CCWR_EP_GETOPT,
- CCWR_EP_SETOPT,
- CCWR_EP_DESTROY,
- CCWR_EP_BIND,
- CCWR_EP_CONNECT,
- CCWR_EP_LISTEN,
- CCWR_EP_SHUTDOWN,
- CCWR_EP_LISTEN_CREATE,
- CCWR_EP_LISTEN_DESTROY,
- CCWR_EP_QUERY,
- CCWR_CR_ACCEPT,
- CCWR_CR_REJECT,
- CCWR_CONSOLE,
- CCWR_TERM,
- CCWR_FLASH_INIT,
- CCWR_FLASH,
- CCWR_BUF_ALLOC,
- CCWR_BUF_FREE,
- CCWR_FLASH_WRITE,
- CCWR_INIT, /* WARNING: Don't move this ever again! */
-
-
-
- /* Add new IDs here */
-
-
-
- /*
- * WARNING: CCWR_LAST must always be the last verbs id defined!
- * All the preceding IDs are fixed, and must not change.
- * You can add new IDs, but must not remove or reorder
- * any IDs. If you do, YOU will ruin any hope of
- * compatibility between versions.
- */
- CCWR_LAST,
-
- /*
- * Start over at 1 so that arrays indexed by user wr id's
- * begin at 1. This is OK since the verbs and user wr id's
- * are always used on disjoint sets of queues.
- */
- /*
- * The order of the CCWR_SEND_XX verbs must
- * match the order of the RDMA_OPs
- */
- CCWR_SEND = 1,
- CCWR_SEND_INV,
- CCWR_SEND_SE,
- CCWR_SEND_SE_INV,
- CCWR_RDMA_WRITE,
- CCWR_RDMA_READ,
- CCWR_RDMA_READ_INV,
- CCWR_MW_BIND,
- CCWR_NSMR_FASTREG,
- CCWR_STAG_INVALIDATE,
- CCWR_RECV,
- CCWR_NOP,
- CCWR_UNIMPL,
-/* WARNING: This must always be the last user wr id defined! */
-};
-#define RDMA_SEND_OPCODE_FROM_WR_ID(x) (x+2)
-
-/*
- * SQ/RQ Work Request Types
- */
-enum c2_wr_type {
- C2_WR_TYPE_SEND = CCWR_SEND,
- C2_WR_TYPE_SEND_SE = CCWR_SEND_SE,
- C2_WR_TYPE_SEND_INV = CCWR_SEND_INV,
- C2_WR_TYPE_SEND_SE_INV = CCWR_SEND_SE_INV,
- C2_WR_TYPE_RDMA_WRITE = CCWR_RDMA_WRITE,
- C2_WR_TYPE_RDMA_READ = CCWR_RDMA_READ,
- C2_WR_TYPE_RDMA_READ_INV_STAG = CCWR_RDMA_READ_INV,
- C2_WR_TYPE_BIND_MW = CCWR_MW_BIND,
- C2_WR_TYPE_FASTREG_NSMR = CCWR_NSMR_FASTREG,
- C2_WR_TYPE_INV_STAG = CCWR_STAG_INVALIDATE,
- C2_WR_TYPE_RECV = CCWR_RECV,
- C2_WR_TYPE_NOP = CCWR_NOP,
-};
-
-struct c2_netaddr {
- __be32 ip_addr;
- __be32 netmask;
- u32 mtu;
-};
-
-struct c2_route {
- u32 ip_addr; /* 0 indicates the default route */
- u32 netmask; /* netmask associated with dst */
- u32 flags;
- union {
- u32 ipaddr; /* address of the nexthop interface */
- u8 enaddr[6];
- } nexthop;
-};
-
-/*
- * A Scatter Gather Entry.
- */
-struct c2_data_addr {
- __be32 stag;
- __be32 length;
- __be64 to;
-};
-
-/*
- * MR and MW flags used by the consumer, RI, and RNIC.
- */
-enum c2_mm_flags {
- MEM_REMOTE = 0x0001, /* allow mw binds with remote access. */
- MEM_VA_BASED = 0x0002, /* Not Zero-based */
- MEM_PBL_COMPLETE = 0x0004, /* PBL array is complete in this msg */
- MEM_LOCAL_READ = 0x0008, /* allow local reads */
- MEM_LOCAL_WRITE = 0x0010, /* allow local writes */
- MEM_REMOTE_READ = 0x0020, /* allow remote reads */
- MEM_REMOTE_WRITE = 0x0040, /* allow remote writes */
- MEM_WINDOW_BIND = 0x0080, /* binds allowed */
- MEM_SHARED = 0x0100, /* set if MR is shared */
- MEM_STAG_VALID = 0x0200 /* set if STAG is in valid state */
-};
-
-/*
- * CCIL API ACF flags defined in terms of the low level mem flags.
- * This minimizes translation needed in the user API
- */
-enum c2_acf {
- C2_ACF_LOCAL_READ = MEM_LOCAL_READ,
- C2_ACF_LOCAL_WRITE = MEM_LOCAL_WRITE,
- C2_ACF_REMOTE_READ = MEM_REMOTE_READ,
- C2_ACF_REMOTE_WRITE = MEM_REMOTE_WRITE,
- C2_ACF_WINDOW_BIND = MEM_WINDOW_BIND
-};
-
-/*
- * Image types of objects written to flash
- */
-#define C2_FLASH_IMG_BITFILE 1
-#define C2_FLASH_IMG_OPTION_ROM 2
-#define C2_FLASH_IMG_VPD 3
-
-/*
- * to fix bug 1815 we define the max size allowable of the
- * terminate message (per the IETF spec).Refer to the IETF
- * protocol specification, section 12.1.6, page 64)
- * The message is prefixed by 20 types of DDP info.
- *
- * Then the message has 6 bytes for the terminate control
- * and DDP segment length info plus a DDP header (either
- * 14 or 18 byts) plus 28 bytes for the RDMA header.
- * Thus the max size in:
- * 20 + (6 + 18 + 28) = 72
- */
-#define C2_MAX_TERMINATE_MESSAGE_SIZE (72)
-
-/*
- * Build String Length. It must be the same as C2_BUILD_STR_LEN in ccil_api.h
- */
-#define WR_BUILD_STR_LEN 64
-
-/*
- * WARNING: All of these structs need to align any 64bit types on
- * 64 bit boundaries! 64bit types include u64 and u64.
- */
-
-/*
- * Clustercore Work Request Header. Be sensitive to field layout
- * and alignment.
- */
-struct c2wr_hdr {
- /* wqe_count is part of the cqe. It is put here so the
- * adapter can write to it while the wr is pending without
- * clobbering part of the wr. This word need not be dma'd
- * from the host to adapter by libccil, but we copy it anyway
- * to make the memcpy to the adapter better aligned.
- */
- __be32 wqe_count;
-
- /* Put these fields next so that later 32- and 64-bit
- * quantities are naturally aligned.
- */
- u8 id;
- u8 result; /* adapter -> host */
- u8 sge_count; /* host -> adapter */
- u8 flags; /* host -> adapter */
-
- u64 context;
-#ifdef CCMSGMAGIC
- u32 magic;
- u32 pad;
-#endif
-} __attribute__((packed));
-
-/*
- *------------------------ RNIC ------------------------
- */
-
-/*
- * WR_RNIC_OPEN
- */
-
-/*
- * Flags for the RNIC WRs
- */
-enum c2_rnic_flags {
- RNIC_IRD_STATIC = 0x0001,
- RNIC_ORD_STATIC = 0x0002,
- RNIC_QP_STATIC = 0x0004,
- RNIC_SRQ_SUPPORTED = 0x0008,
- RNIC_PBL_BLOCK_MODE = 0x0010,
- RNIC_SRQ_MODEL_ARRIVAL = 0x0020,
- RNIC_CQ_OVF_DETECTED = 0x0040,
- RNIC_PRIV_MODE = 0x0080
-};
-
-struct c2wr_rnic_open_req {
- struct c2wr_hdr hdr;
- u64 user_context;
- __be16 flags; /* See enum c2_rnic_flags */
- __be16 port_num;
-} __attribute__((packed));
-
-struct c2wr_rnic_open_rep {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
-} __attribute__((packed));
-
-union c2wr_rnic_open {
- struct c2wr_rnic_open_req req;
- struct c2wr_rnic_open_rep rep;
-} __attribute__((packed));
-
-struct c2wr_rnic_query_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
-} __attribute__((packed));
-
-/*
- * WR_RNIC_QUERY
- */
-struct c2wr_rnic_query_rep {
- struct c2wr_hdr hdr;
- u64 user_context;
- __be32 vendor_id;
- __be32 part_number;
- __be32 hw_version;
- __be32 fw_ver_major;
- __be32 fw_ver_minor;
- __be32 fw_ver_patch;
- char fw_ver_build_str[WR_BUILD_STR_LEN];
- __be32 max_qps;
- __be32 max_qp_depth;
- u32 max_srq_depth;
- u32 max_send_sgl_depth;
- u32 max_rdma_sgl_depth;
- __be32 max_cqs;
- __be32 max_cq_depth;
- u32 max_cq_event_handlers;
- __be32 max_mrs;
- u32 max_pbl_depth;
- __be32 max_pds;
- __be32 max_global_ird;
- u32 max_global_ord;
- __be32 max_qp_ird;
- __be32 max_qp_ord;
- u32 flags;
- __be32 max_mws;
- u32 pbe_range_low;
- u32 pbe_range_high;
- u32 max_srqs;
- u32 page_size;
-} __attribute__((packed));
-
-union c2wr_rnic_query {
- struct c2wr_rnic_query_req req;
- struct c2wr_rnic_query_rep rep;
-} __attribute__((packed));
-
-/*
- * WR_RNIC_GETCONFIG
- */
-
-struct c2wr_rnic_getconfig_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 option; /* see c2_getconfig_cmd_t */
- u64 reply_buf;
- u32 reply_buf_len;
-} __attribute__((packed)) ;
-
-struct c2wr_rnic_getconfig_rep {
- struct c2wr_hdr hdr;
- u32 option; /* see c2_getconfig_cmd_t */
- u32 count_len; /* length of the number of addresses configured */
-} __attribute__((packed)) ;
-
-union c2wr_rnic_getconfig {
- struct c2wr_rnic_getconfig_req req;
- struct c2wr_rnic_getconfig_rep rep;
-} __attribute__((packed)) ;
-
-/*
- * WR_RNIC_SETCONFIG
- */
-struct c2wr_rnic_setconfig_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- __be32 option; /* See c2_setconfig_cmd_t */
- /* variable data and pad. See c2_netaddr and c2_route */
- u8 data[0];
-} __attribute__((packed)) ;
-
-struct c2wr_rnic_setconfig_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_rnic_setconfig {
- struct c2wr_rnic_setconfig_req req;
- struct c2wr_rnic_setconfig_rep rep;
-} __attribute__((packed)) ;
-
-/*
- * WR_RNIC_CLOSE
- */
-struct c2wr_rnic_close_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_rnic_close_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_rnic_close {
- struct c2wr_rnic_close_req req;
- struct c2wr_rnic_close_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ CQ ------------------------
- */
-struct c2wr_cq_create_req {
- struct c2wr_hdr hdr;
- __be64 shared_ht;
- u64 user_context;
- __be64 msg_pool;
- u32 rnic_handle;
- __be32 msg_size;
- __be32 depth;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_create_rep {
- struct c2wr_hdr hdr;
- __be32 mq_index;
- __be32 adapter_shared;
- u32 cq_handle;
-} __attribute__((packed)) ;
-
-union c2wr_cq_create {
- struct c2wr_cq_create_req req;
- struct c2wr_cq_create_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_modify_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 cq_handle;
- u32 new_depth;
- u64 new_msg_pool;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_modify_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_cq_modify {
- struct c2wr_cq_modify_req req;
- struct c2wr_cq_modify_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_destroy_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 cq_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_destroy_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_cq_destroy {
- struct c2wr_cq_destroy_req req;
- struct c2wr_cq_destroy_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ PD ------------------------
- */
-struct c2wr_pd_alloc_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_pd_alloc_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_pd_alloc {
- struct c2wr_pd_alloc_req req;
- struct c2wr_pd_alloc_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_pd_dealloc_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_pd_dealloc_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_pd_dealloc {
- struct c2wr_pd_dealloc_req req;
- struct c2wr_pd_dealloc_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ SRQ ------------------------
- */
-struct c2wr_srq_create_req {
- struct c2wr_hdr hdr;
- u64 shared_ht;
- u64 user_context;
- u32 rnic_handle;
- u32 srq_depth;
- u32 srq_limit;
- u32 sgl_depth;
- u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_srq_create_rep {
- struct c2wr_hdr hdr;
- u32 srq_depth;
- u32 sgl_depth;
- u32 msg_size;
- u32 mq_index;
- u32 mq_start;
- u32 srq_handle;
-} __attribute__((packed)) ;
-
-union c2wr_srq_create {
- struct c2wr_srq_create_req req;
- struct c2wr_srq_create_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_srq_destroy_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 srq_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_srq_destroy_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_srq_destroy {
- struct c2wr_srq_destroy_req req;
- struct c2wr_srq_destroy_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ QP ------------------------
- */
-enum c2wr_qp_flags {
- QP_RDMA_READ = 0x00000001, /* RDMA read enabled? */
- QP_RDMA_WRITE = 0x00000002, /* RDMA write enabled? */
- QP_MW_BIND = 0x00000004, /* MWs enabled */
- QP_ZERO_STAG = 0x00000008, /* enabled? */
- QP_REMOTE_TERMINATION = 0x00000010, /* remote end terminated */
- QP_RDMA_READ_RESPONSE = 0x00000020 /* Remote RDMA read */
- /* enabled? */
-};
-
-struct c2wr_qp_create_req {
- struct c2wr_hdr hdr;
- __be64 shared_sq_ht;
- __be64 shared_rq_ht;
- u64 user_context;
- u32 rnic_handle;
- u32 sq_cq_handle;
- u32 rq_cq_handle;
- __be32 sq_depth;
- __be32 rq_depth;
- u32 srq_handle;
- u32 srq_limit;
- __be32 flags; /* see enum c2wr_qp_flags */
- __be32 send_sgl_depth;
- __be32 recv_sgl_depth;
- __be32 rdma_write_sgl_depth;
- __be32 ord;
- __be32 ird;
- u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_create_rep {
- struct c2wr_hdr hdr;
- __be32 sq_depth;
- __be32 rq_depth;
- u32 send_sgl_depth;
- u32 recv_sgl_depth;
- u32 rdma_write_sgl_depth;
- u32 ord;
- u32 ird;
- __be32 sq_msg_size;
- __be32 sq_mq_index;
- __be32 sq_mq_start;
- __be32 rq_msg_size;
- __be32 rq_mq_index;
- __be32 rq_mq_start;
- u32 qp_handle;
-} __attribute__((packed)) ;
-
-union c2wr_qp_create {
- struct c2wr_qp_create_req req;
- struct c2wr_qp_create_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_query_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 qp_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_query_rep {
- struct c2wr_hdr hdr;
- u64 user_context;
- u32 rnic_handle;
- u32 sq_depth;
- u32 rq_depth;
- u32 send_sgl_depth;
- u32 rdma_write_sgl_depth;
- u32 recv_sgl_depth;
- u32 ord;
- u32 ird;
- u16 qp_state;
- u16 flags; /* see c2wr_qp_flags_t */
- u32 qp_id;
- u32 local_addr;
- u32 remote_addr;
- u16 local_port;
- u16 remote_port;
- u32 terminate_msg_length; /* 0 if not present */
- u8 data[0];
- /* Terminate Message in-line here. */
-} __attribute__((packed)) ;
-
-union c2wr_qp_query {
- struct c2wr_qp_query_req req;
- struct c2wr_qp_query_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_modify_req {
- struct c2wr_hdr hdr;
- u64 stream_msg;
- u32 stream_msg_length;
- u32 rnic_handle;
- u32 qp_handle;
- __be32 next_qp_state;
- __be32 ord;
- __be32 ird;
- __be32 sq_depth;
- __be32 rq_depth;
- u32 llp_ep_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_modify_rep {
- struct c2wr_hdr hdr;
- u32 ord;
- u32 ird;
- u32 sq_depth;
- u32 rq_depth;
- u32 sq_msg_size;
- u32 sq_mq_index;
- u32 sq_mq_start;
- u32 rq_msg_size;
- u32 rq_mq_index;
- u32 rq_mq_start;
-} __attribute__((packed)) ;
-
-union c2wr_qp_modify {
- struct c2wr_qp_modify_req req;
- struct c2wr_qp_modify_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_destroy_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 qp_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_destroy_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_qp_destroy {
- struct c2wr_qp_destroy_req req;
- struct c2wr_qp_destroy_rep rep;
-} __attribute__((packed)) ;
-
-/*
- * The CCWR_QP_CONNECT msg is posted on the verbs request queue. It can
- * only be posted when a QP is in IDLE state. After the connect request is
- * submitted to the LLP, the adapter moves the QP to CONNECT_PENDING state.
- * No synchronous reply from adapter to this WR. The results of
- * connection are passed back in an async event CCAE_ACTIVE_CONNECT_RESULTS
- * See c2wr_ae_active_connect_results_t
- */
-struct c2wr_qp_connect_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 qp_handle;
- __be32 remote_addr;
- __be16 remote_port;
- u16 pad;
- __be32 private_data_length;
- u8 private_data[0]; /* Private data in-line. */
-} __attribute__((packed)) ;
-
-struct c2wr_qp_connect {
- struct c2wr_qp_connect_req req;
- /* no synchronous reply. */
-} __attribute__((packed)) ;
-
-
-/*
- *------------------------ MM ------------------------
- */
-
-struct c2wr_nsmr_stag_alloc_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 pbl_depth;
- u32 pd_id;
- u32 flags;
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_stag_alloc_rep {
- struct c2wr_hdr hdr;
- u32 pbl_depth;
- u32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_nsmr_stag_alloc {
- struct c2wr_nsmr_stag_alloc_req req;
- struct c2wr_nsmr_stag_alloc_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_register_req {
- struct c2wr_hdr hdr;
- __be64 va;
- u32 rnic_handle;
- __be16 flags;
- u8 stag_key;
- u8 pad;
- u32 pd_id;
- __be32 pbl_depth;
- __be32 pbe_size;
- __be32 fbo;
- __be32 length;
- __be32 addrs_length;
- /* array of paddrs (must be aligned on a 64bit boundary) */
- __be64 paddrs[0];
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_register_rep {
- struct c2wr_hdr hdr;
- u32 pbl_depth;
- __be32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_nsmr_register {
- struct c2wr_nsmr_register_req req;
- struct c2wr_nsmr_register_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_pbl_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- __be32 flags;
- __be32 stag_index;
- __be32 addrs_length;
- /* array of paddrs (must be aligned on a 64bit boundary) */
- __be64 paddrs[0];
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_pbl_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_nsmr_pbl {
- struct c2wr_nsmr_pbl_req req;
- struct c2wr_nsmr_pbl_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_mr_query_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 stag_index;
-} __attribute__((packed)) ;
-
-struct c2wr_mr_query_rep {
- struct c2wr_hdr hdr;
- u8 stag_key;
- u8 pad[3];
- u32 pd_id;
- u32 flags;
- u32 pbl_depth;
-} __attribute__((packed)) ;
-
-union c2wr_mr_query {
- struct c2wr_mr_query_req req;
- struct c2wr_mr_query_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_mw_query_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 stag_index;
-} __attribute__((packed)) ;
-
-struct c2wr_mw_query_rep {
- struct c2wr_hdr hdr;
- u8 stag_key;
- u8 pad[3];
- u32 pd_id;
- u32 flags;
-} __attribute__((packed)) ;
-
-union c2wr_mw_query {
- struct c2wr_mw_query_req req;
- struct c2wr_mw_query_rep rep;
-} __attribute__((packed)) ;
-
-
-struct c2wr_stag_dealloc_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- __be32 stag_index;
-} __attribute__((packed)) ;
-
-struct c2wr_stag_dealloc_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_stag_dealloc {
- struct c2wr_stag_dealloc_req req;
- struct c2wr_stag_dealloc_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_reregister_req {
- struct c2wr_hdr hdr;
- u64 va;
- u32 rnic_handle;
- u16 flags;
- u8 stag_key;
- u8 pad;
- u32 stag_index;
- u32 pd_id;
- u32 pbl_depth;
- u32 pbe_size;
- u32 fbo;
- u32 length;
- u32 addrs_length;
- u32 pad1;
- /* array of paddrs (must be aligned on a 64bit boundary) */
- u64 paddrs[0];
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_reregister_rep {
- struct c2wr_hdr hdr;
- u32 pbl_depth;
- u32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_nsmr_reregister {
- struct c2wr_nsmr_reregister_req req;
- struct c2wr_nsmr_reregister_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_smr_register_req {
- struct c2wr_hdr hdr;
- u64 va;
- u32 rnic_handle;
- u16 flags;
- u8 stag_key;
- u8 pad;
- u32 stag_index;
- u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_smr_register_rep {
- struct c2wr_hdr hdr;
- u32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_smr_register {
- struct c2wr_smr_register_req req;
- struct c2wr_smr_register_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_mw_alloc_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_mw_alloc_rep {
- struct c2wr_hdr hdr;
- u32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_mw_alloc {
- struct c2wr_mw_alloc_req req;
- struct c2wr_mw_alloc_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ WRs -----------------------
- */
-
-struct c2wr_user_hdr {
- struct c2wr_hdr hdr; /* Has status and WR Type */
-} __attribute__((packed)) ;
-
-enum c2_qp_state {
- C2_QP_STATE_IDLE = 0x01,
- C2_QP_STATE_CONNECTING = 0x02,
- C2_QP_STATE_RTS = 0x04,
- C2_QP_STATE_CLOSING = 0x08,
- C2_QP_STATE_TERMINATE = 0x10,
- C2_QP_STATE_ERROR = 0x20,
-};
-
-/* Completion queue entry. */
-struct c2wr_ce {
- struct c2wr_hdr hdr; /* Has status and WR Type */
- u64 qp_user_context; /* c2_user_qp_t * */
- u32 qp_state; /* Current QP State */
- u32 handle; /* QPID or EP Handle */
- __be32 bytes_rcvd; /* valid for RECV WCs */
- u32 stag;
-} __attribute__((packed)) ;
-
-
-/*
- * Flags used for all post-sq WRs. These must fit in the flags
- * field of the struct c2wr_hdr (eight bits).
- */
-enum {
- SQ_SIGNALED = 0x01,
- SQ_READ_FENCE = 0x02,
- SQ_FENCE = 0x04,
-};
-
-/*
- * Common fields for all post-sq WRs. Namely the standard header and a
- * secondary header with fields common to all post-sq WRs.
- */
-struct c2_sq_hdr {
- struct c2wr_user_hdr user_hdr;
-} __attribute__((packed));
-
-/*
- * Same as above but for post-rq WRs.
- */
-struct c2_rq_hdr {
- struct c2wr_user_hdr user_hdr;
-} __attribute__((packed));
-
-/*
- * use the same struct for all sends.
- */
-struct c2wr_send_req {
- struct c2_sq_hdr sq_hdr;
- __be32 sge_len;
- __be32 remote_stag;
- u8 data[0]; /* SGE array */
-} __attribute__((packed));
-
-union c2wr_send {
- struct c2wr_send_req req;
- struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_rdma_write_req {
- struct c2_sq_hdr sq_hdr;
- __be64 remote_to;
- __be32 remote_stag;
- __be32 sge_len;
- u8 data[0]; /* SGE array */
-} __attribute__((packed));
-
-union c2wr_rdma_write {
- struct c2wr_rdma_write_req req;
- struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_rdma_read_req {
- struct c2_sq_hdr sq_hdr;
- __be64 local_to;
- __be64 remote_to;
- __be32 local_stag;
- __be32 remote_stag;
- __be32 length;
-} __attribute__((packed));
-
-union c2wr_rdma_read {
- struct c2wr_rdma_read_req req;
- struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_mw_bind_req {
- struct c2_sq_hdr sq_hdr;
- u64 va;
- u8 stag_key;
- u8 pad[3];
- u32 mw_stag_index;
- u32 mr_stag_index;
- u32 length;
- u32 flags;
-} __attribute__((packed));
-
-union c2wr_mw_bind {
- struct c2wr_mw_bind_req req;
- struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_nsmr_fastreg_req {
- struct c2_sq_hdr sq_hdr;
- u64 va;
- u8 stag_key;
- u8 pad[3];
- u32 stag_index;
- u32 pbe_size;
- u32 fbo;
- u32 length;
- u32 addrs_length;
- /* array of paddrs (must be aligned on a 64bit boundary) */
- u64 paddrs[0];
-} __attribute__((packed));
-
-union c2wr_nsmr_fastreg {
- struct c2wr_nsmr_fastreg_req req;
- struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_stag_invalidate_req {
- struct c2_sq_hdr sq_hdr;
- u8 stag_key;
- u8 pad[3];
- u32 stag_index;
-} __attribute__((packed));
-
-union c2wr_stag_invalidate {
- struct c2wr_stag_invalidate_req req;
- struct c2wr_ce rep;
-} __attribute__((packed));
-
-union c2wr_sqwr {
- struct c2_sq_hdr sq_hdr;
- struct c2wr_send_req send;
- struct c2wr_send_req send_se;
- struct c2wr_send_req send_inv;
- struct c2wr_send_req send_se_inv;
- struct c2wr_rdma_write_req rdma_write;
- struct c2wr_rdma_read_req rdma_read;
- struct c2wr_mw_bind_req mw_bind;
- struct c2wr_nsmr_fastreg_req nsmr_fastreg;
- struct c2wr_stag_invalidate_req stag_inv;
-} __attribute__((packed));
-
-
-/*
- * RQ WRs
- */
-struct c2wr_rqwr {
- struct c2_rq_hdr rq_hdr;
- u8 data[0]; /* array of SGEs */
-} __attribute__((packed));
-
-union c2wr_recv {
- struct c2wr_rqwr req;
- struct c2wr_ce rep;
-} __attribute__((packed));
-
-/*
- * All AEs start with this header. Most AEs only need to convey the
- * information in the header. Some, like LLP connection events, need
- * more info. The union typdef c2wr_ae_t has all the possible AEs.
- *
- * hdr.context is the user_context from the rnic_open WR. NULL If this
- * is not affiliated with an rnic
- *
- * hdr.id is the AE identifier (eg; CCAE_REMOTE_SHUTDOWN,
- * CCAE_LLP_CLOSE_COMPLETE)
- *
- * resource_type is one of: C2_RES_IND_QP, C2_RES_IND_CQ, C2_RES_IND_SRQ
- *
- * user_context is the context passed down when the host created the resource.
- */
-struct c2wr_ae_hdr {
- struct c2wr_hdr hdr;
- u64 user_context; /* user context for this res. */
- __be32 resource_type; /* see enum c2_resource_indicator */
- __be32 resource; /* handle for resource */
- __be32 qp_state; /* current QP State */
-} __attribute__((packed));
-
-/*
- * After submitting the CCAE_ACTIVE_CONNECT_RESULTS message on the AEQ,
- * the adapter moves the QP into RTS state
- */
-struct c2wr_ae_active_connect_results {
- struct c2wr_ae_hdr ae_hdr;
- __be32 laddr;
- __be32 raddr;
- __be16 lport;
- __be16 rport;
- __be32 private_data_length;
- u8 private_data[0]; /* data is in-line in the msg. */
-} __attribute__((packed));
-
-/*
- * When connections are established by the stack (and the private data
- * MPA frame is received), the adapter will generate an event to the host.
- * The details of the connection, any private data, and the new connection
- * request handle is passed up via the CCAE_CONNECTION_REQUEST msg on the
- * AE queue:
- */
-struct c2wr_ae_connection_request {
- struct c2wr_ae_hdr ae_hdr;
- u32 cr_handle; /* connreq handle (sock ptr) */
- __be32 laddr;
- __be32 raddr;
- __be16 lport;
- __be16 rport;
- __be32 private_data_length;
- u8 private_data[0]; /* data is in-line in the msg. */
-} __attribute__((packed));
-
-union c2wr_ae {
- struct c2wr_ae_hdr ae_generic;
- struct c2wr_ae_active_connect_results ae_active_connect_results;
- struct c2wr_ae_connection_request ae_connection_request;
-} __attribute__((packed));
-
-struct c2wr_init_req {
- struct c2wr_hdr hdr;
- __be64 hint_count;
- __be64 q0_host_shared;
- __be64 q1_host_shared;
- __be64 q1_host_msg_pool;
- __be64 q2_host_shared;
- __be64 q2_host_msg_pool;
-} __attribute__((packed));
-
-struct c2wr_init_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_init {
- struct c2wr_init_req req;
- struct c2wr_init_rep rep;
-} __attribute__((packed));
-
-/*
- * For upgrading flash.
- */
-
-struct c2wr_flash_init_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
-} __attribute__((packed));
-
-struct c2wr_flash_init_rep {
- struct c2wr_hdr hdr;
- u32 adapter_flash_buf_offset;
- u32 adapter_flash_len;
-} __attribute__((packed));
-
-union c2wr_flash_init {
- struct c2wr_flash_init_req req;
- struct c2wr_flash_init_rep rep;
-} __attribute__((packed));
-
-struct c2wr_flash_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 len;
-} __attribute__((packed));
-
-struct c2wr_flash_rep {
- struct c2wr_hdr hdr;
- u32 status;
-} __attribute__((packed));
-
-union c2wr_flash {
- struct c2wr_flash_req req;
- struct c2wr_flash_rep rep;
-} __attribute__((packed));
-
-struct c2wr_buf_alloc_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 size;
-} __attribute__((packed));
-
-struct c2wr_buf_alloc_rep {
- struct c2wr_hdr hdr;
- u32 offset; /* 0 if mem not available */
- u32 size; /* 0 if mem not available */
-} __attribute__((packed));
-
-union c2wr_buf_alloc {
- struct c2wr_buf_alloc_req req;
- struct c2wr_buf_alloc_rep rep;
-} __attribute__((packed));
-
-struct c2wr_buf_free_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 offset; /* Must match value from alloc */
- u32 size; /* Must match value from alloc */
-} __attribute__((packed));
-
-struct c2wr_buf_free_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_buf_free {
- struct c2wr_buf_free_req req;
- struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_flash_write_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 offset;
- u32 size;
- u32 type;
- u32 flags;
-} __attribute__((packed));
-
-struct c2wr_flash_write_rep {
- struct c2wr_hdr hdr;
- u32 status;
-} __attribute__((packed));
-
-union c2wr_flash_write {
- struct c2wr_flash_write_req req;
- struct c2wr_flash_write_rep rep;
-} __attribute__((packed));
-
-/*
- * Messages for LLP connection setup.
- */
-
-/*
- * Listen Request. This allocates a listening endpoint to allow passive
- * connection setup. Newly established LLP connections are passed up
- * via an AE. See c2wr_ae_connection_request_t
- */
-struct c2wr_ep_listen_create_req {
- struct c2wr_hdr hdr;
- u64 user_context; /* returned in AEs. */
- u32 rnic_handle;
- __be32 local_addr; /* local addr, or 0 */
- __be16 local_port; /* 0 means "pick one" */
- u16 pad;
- __be32 backlog; /* tradional tcp listen bl */
-} __attribute__((packed));
-
-struct c2wr_ep_listen_create_rep {
- struct c2wr_hdr hdr;
- u32 ep_handle; /* handle to new listening ep */
- u16 local_port; /* resulting port... */
- u16 pad;
-} __attribute__((packed));
-
-union c2wr_ep_listen_create {
- struct c2wr_ep_listen_create_req req;
- struct c2wr_ep_listen_create_rep rep;
-} __attribute__((packed));
-
-struct c2wr_ep_listen_destroy_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 ep_handle;
-} __attribute__((packed));
-
-struct c2wr_ep_listen_destroy_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_ep_listen_destroy {
- struct c2wr_ep_listen_destroy_req req;
- struct c2wr_ep_listen_destroy_rep rep;
-} __attribute__((packed));
-
-struct c2wr_ep_query_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 ep_handle;
-} __attribute__((packed));
-
-struct c2wr_ep_query_rep {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 local_addr;
- u32 remote_addr;
- u16 local_port;
- u16 remote_port;
-} __attribute__((packed));
-
-union c2wr_ep_query {
- struct c2wr_ep_query_req req;
- struct c2wr_ep_query_rep rep;
-} __attribute__((packed));
-
-
-/*
- * The host passes this down to indicate acceptance of a pending iWARP
- * connection. The cr_handle was obtained from the CONNECTION_REQUEST
- * AE passed up by the adapter. See c2wr_ae_connection_request_t.
- */
-struct c2wr_cr_accept_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 qp_handle; /* QP to bind to this LLP conn */
- u32 ep_handle; /* LLP handle to accept */
- __be32 private_data_length;
- u8 private_data[0]; /* data in-line in msg. */
-} __attribute__((packed));
-
-/*
- * adapter sends reply when private data is successfully submitted to
- * the LLP.
- */
-struct c2wr_cr_accept_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_cr_accept {
- struct c2wr_cr_accept_req req;
- struct c2wr_cr_accept_rep rep;
-} __attribute__((packed));
-
-/*
- * The host sends this down if a given iWARP connection request was
- * rejected by the consumer. The cr_handle was obtained from a
- * previous c2wr_ae_connection_request_t AE sent by the adapter.
- */
-struct c2wr_cr_reject_req {
- struct c2wr_hdr hdr;
- u32 rnic_handle;
- u32 ep_handle; /* LLP handle to reject */
-} __attribute__((packed));
-
-/*
- * Dunno if this is needed, but we'll add it for now. The adapter will
- * send the reject_reply after the LLP endpoint has been destroyed.
- */
-struct c2wr_cr_reject_rep {
- struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_cr_reject {
- struct c2wr_cr_reject_req req;
- struct c2wr_cr_reject_rep rep;
-} __attribute__((packed));
-
-/*
- * console command. Used to implement a debug console over the verbs
- * request and reply queues.
- */
-
-/*
- * Console request message. It contains:
- * - message hdr with id = CCWR_CONSOLE
- * - the physaddr/len of host memory to be used for the reply.
- * - the command string. eg: "netstat -s" or "zoneinfo"
- */
-struct c2wr_console_req {
- struct c2wr_hdr hdr; /* id = CCWR_CONSOLE */
- u64 reply_buf; /* pinned host buf for reply */
- u32 reply_buf_len; /* length of reply buffer */
- u8 command[0]; /* NUL terminated ascii string */
- /* containing the command req */
-} __attribute__((packed));
-
-/*
- * flags used in the console reply.
- */
-enum c2_console_flags {
- CONS_REPLY_TRUNCATED = 0x00000001 /* reply was truncated */
-} __attribute__((packed));
-
-/*
- * Console reply message.
- * hdr.result contains the c2_status_t error if the reply was _not_ generated,
- * or C2_OK if the reply was generated.
- */
-struct c2wr_console_rep {
- struct c2wr_hdr hdr; /* id = CCWR_CONSOLE */
- u32 flags;
-} __attribute__((packed));
-
-union c2wr_console {
- struct c2wr_console_req req;
- struct c2wr_console_rep rep;
-} __attribute__((packed));
-
-
-/*
- * Giant union with all WRs. Makes life easier...
- */
-union c2wr {
- struct c2wr_hdr hdr;
- struct c2wr_user_hdr user_hdr;
- union c2wr_rnic_open rnic_open;
- union c2wr_rnic_query rnic_query;
- union c2wr_rnic_getconfig rnic_getconfig;
- union c2wr_rnic_setconfig rnic_setconfig;
- union c2wr_rnic_close rnic_close;
- union c2wr_cq_create cq_create;
- union c2wr_cq_modify cq_modify;
- union c2wr_cq_destroy cq_destroy;
- union c2wr_pd_alloc pd_alloc;
- union c2wr_pd_dealloc pd_dealloc;
- union c2wr_srq_create srq_create;
- union c2wr_srq_destroy srq_destroy;
- union c2wr_qp_create qp_create;
- union c2wr_qp_query qp_query;
- union c2wr_qp_modify qp_modify;
- union c2wr_qp_destroy qp_destroy;
- struct c2wr_qp_connect qp_connect;
- union c2wr_nsmr_stag_alloc nsmr_stag_alloc;
- union c2wr_nsmr_register nsmr_register;
- union c2wr_nsmr_pbl nsmr_pbl;
- union c2wr_mr_query mr_query;
- union c2wr_mw_query mw_query;
- union c2wr_stag_dealloc stag_dealloc;
- union c2wr_sqwr sqwr;
- struct c2wr_rqwr rqwr;
- struct c2wr_ce ce;
- union c2wr_ae ae;
- union c2wr_init init;
- union c2wr_ep_listen_create ep_listen_create;
- union c2wr_ep_listen_destroy ep_listen_destroy;
- union c2wr_cr_accept cr_accept;
- union c2wr_cr_reject cr_reject;
- union c2wr_console console;
- union c2wr_flash_init flash_init;
- union c2wr_flash flash;
- union c2wr_buf_alloc buf_alloc;
- union c2wr_buf_free buf_free;
- union c2wr_flash_write flash_write;
-} __attribute__((packed));
-
-
-/*
- * Accessors for the wr fields that are packed together tightly to
- * reduce the wr message size. The wr arguments are void* so that
- * either a struct c2wr*, a struct c2wr_hdr*, or a pointer to any of the types
- * in the struct c2wr union can be passed in.
- */
-static __inline__ u8 c2_wr_get_id(void *wr)
-{
- return ((struct c2wr_hdr *) wr)->id;
-}
-static __inline__ void c2_wr_set_id(void *wr, u8 id)
-{
- ((struct c2wr_hdr *) wr)->id = id;
-}
-static __inline__ u8 c2_wr_get_result(void *wr)
-{
- return ((struct c2wr_hdr *) wr)->result;
-}
-static __inline__ void c2_wr_set_result(void *wr, u8 result)
-{
- ((struct c2wr_hdr *) wr)->result = result;
-}
-static __inline__ u8 c2_wr_get_flags(void *wr)
-{
- return ((struct c2wr_hdr *) wr)->flags;
-}
-static __inline__ void c2_wr_set_flags(void *wr, u8 flags)
-{
- ((struct c2wr_hdr *) wr)->flags = flags;
-}
-static __inline__ u8 c2_wr_get_sge_count(void *wr)
-{
- return ((struct c2wr_hdr *) wr)->sge_count;
-}
-static __inline__ void c2_wr_set_sge_count(void *wr, u8 sge_count)
-{
- ((struct c2wr_hdr *) wr)->sge_count = sge_count;
-}
-static __inline__ __be32 c2_wr_get_wqe_count(void *wr)
-{
- return ((struct c2wr_hdr *) wr)->wqe_count;
-}
-static __inline__ void c2_wr_set_wqe_count(void *wr, u32 wqe_count)
-{
- ((struct c2wr_hdr *) wr)->wqe_count = wqe_count;
-}
-
-#endif /* _C2_WR_H_ */
diff --git a/drivers/staging/rdma/ehca/Kconfig b/drivers/staging/rdma/ehca/Kconfig
deleted file mode 100644
index 3fadd2ad6426..000000000000
--- a/drivers/staging/rdma/ehca/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-config INFINIBAND_EHCA
- tristate "eHCA support"
- depends on IBMEBUS
- ---help---
- This driver supports the deprecated IBM pSeries eHCA InfiniBand
- adapter.
-
- To compile the driver as a module, choose M here. The module
- will be called ib_ehca.
-
diff --git a/drivers/staging/rdma/ehca/Makefile b/drivers/staging/rdma/ehca/Makefile
deleted file mode 100644
index 74d284e46a40..000000000000
--- a/drivers/staging/rdma/ehca/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-# Authors: Heiko J Schick <schickhj@de.ibm.com>
-# Christoph Raisch <raisch@de.ibm.com>
-# Joachim Fenkes <fenkes@de.ibm.com>
-#
-# Copyright (c) 2005 IBM Corporation
-#
-# All rights reserved.
-#
-# This source code is distributed under a dual license of GPL v2.0 and OpenIB BSD.
-
-obj-$(CONFIG_INFINIBAND_EHCA) += ib_ehca.o
-
-ib_ehca-objs = ehca_main.o ehca_hca.o ehca_mcast.o ehca_pd.o ehca_av.o ehca_eq.o \
- ehca_cq.o ehca_qp.o ehca_sqp.o ehca_mrmw.o ehca_reqs.o ehca_irq.o \
- ehca_uverbs.o ipz_pt_fn.o hcp_if.o hcp_phyp.o
-
diff --git a/drivers/staging/rdma/ehca/TODO b/drivers/staging/rdma/ehca/TODO
deleted file mode 100644
index 199a4a600142..000000000000
--- a/drivers/staging/rdma/ehca/TODO
+++ /dev/null
@@ -1,4 +0,0 @@
-9/2015
-
-The ehca driver has been deprecated and moved to drivers/staging/rdma.
-It will be removed in the 4.6 merge window.
diff --git a/drivers/staging/rdma/ehca/ehca_av.c b/drivers/staging/rdma/ehca/ehca_av.c
deleted file mode 100644
index 94e088c2d989..000000000000
--- a/drivers/staging/rdma/ehca/ehca_av.c
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * address vector functions
- *
- * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- * Khadija Souissi <souissik@de.ibm.com>
- * Reinhard Ernst <rernst@de.ibm.com>
- * Christoph Raisch <raisch@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_tools.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-
-static struct kmem_cache *av_cache;
-
-int ehca_calc_ipd(struct ehca_shca *shca, int port,
- enum ib_rate path_rate, u32 *ipd)
-{
- int path = ib_rate_to_mult(path_rate);
- int link, ret;
- struct ib_port_attr pa;
-
- if (path_rate == IB_RATE_PORT_CURRENT) {
- *ipd = 0;
- return 0;
- }
-
- if (unlikely(path < 0)) {
- ehca_err(&shca->ib_device, "Invalid static rate! path_rate=%x",
- path_rate);
- return -EINVAL;
- }
-
- ret = ehca_query_port(&shca->ib_device, port, &pa);
- if (unlikely(ret < 0)) {
- ehca_err(&shca->ib_device, "Failed to query port ret=%i", ret);
- return ret;
- }
-
- link = ib_width_enum_to_int(pa.active_width) * pa.active_speed;
-
- if (path >= link)
- /* no need to throttle if path faster than link */
- *ipd = 0;
- else
- /* IPD = round((link / path) - 1) */
- *ipd = ((link + (path >> 1)) / path) - 1;
-
- return 0;
-}
-
-struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
-{
- int ret;
- struct ehca_av *av;
- struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
- ib_device);
-
- av = kmem_cache_alloc(av_cache, GFP_KERNEL);
- if (!av) {
- ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p",
- pd, ah_attr);
- return ERR_PTR(-ENOMEM);
- }
-
- av->av.sl = ah_attr->sl;
- av->av.dlid = ah_attr->dlid;
- av->av.slid_path_bits = ah_attr->src_path_bits;
-
- if (ehca_static_rate < 0) {
- u32 ipd;
-
- if (ehca_calc_ipd(shca, ah_attr->port_num,
- ah_attr->static_rate, &ipd)) {
- ret = -EINVAL;
- goto create_ah_exit1;
- }
- av->av.ipd = ipd;
- } else
- av->av.ipd = ehca_static_rate;
-
- av->av.lnh = ah_attr->ah_flags;
- av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6);
- av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_TCLASS_MASK,
- ah_attr->grh.traffic_class);
- av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
- ah_attr->grh.flow_label);
- av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
- ah_attr->grh.hop_limit);
- av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1B);
- /* set sgid in grh.word_1 */
- if (ah_attr->ah_flags & IB_AH_GRH) {
- int rc;
- struct ib_port_attr port_attr;
- union ib_gid gid;
-
- memset(&port_attr, 0, sizeof(port_attr));
- rc = ehca_query_port(pd->device, ah_attr->port_num,
- &port_attr);
- if (rc) { /* invalid port number */
- ret = -EINVAL;
- ehca_err(pd->device, "Invalid port number "
- "ehca_query_port() returned %x "
- "pd=%p ah_attr=%p", rc, pd, ah_attr);
- goto create_ah_exit1;
- }
- memset(&gid, 0, sizeof(gid));
- rc = ehca_query_gid(pd->device,
- ah_attr->port_num,
- ah_attr->grh.sgid_index, &gid);
- if (rc) {
- ret = -EINVAL;
- ehca_err(pd->device, "Failed to retrieve sgid "
- "ehca_query_gid() returned %x "
- "pd=%p ah_attr=%p", rc, pd, ah_attr);
- goto create_ah_exit1;
- }
- memcpy(&av->av.grh.word_1, &gid, sizeof(gid));
- }
- av->av.pmtu = shca->max_mtu;
-
- /* dgid comes in grh.word_3 */
- memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid,
- sizeof(ah_attr->grh.dgid));
-
- return &av->ib_ah;
-
-create_ah_exit1:
- kmem_cache_free(av_cache, av);
-
- return ERR_PTR(ret);
-}
-
-int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
-{
- struct ehca_av *av;
- struct ehca_ud_av new_ehca_av;
- struct ehca_shca *shca = container_of(ah->pd->device, struct ehca_shca,
- ib_device);
-
- memset(&new_ehca_av, 0, sizeof(new_ehca_av));
- new_ehca_av.sl = ah_attr->sl;
- new_ehca_av.dlid = ah_attr->dlid;
- new_ehca_av.slid_path_bits = ah_attr->src_path_bits;
- new_ehca_av.ipd = ah_attr->static_rate;
- new_ehca_av.lnh = EHCA_BMASK_SET(GRH_FLAG_MASK,
- (ah_attr->ah_flags & IB_AH_GRH) > 0);
- new_ehca_av.grh.word_0 = EHCA_BMASK_SET(GRH_TCLASS_MASK,
- ah_attr->grh.traffic_class);
- new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
- ah_attr->grh.flow_label);
- new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
- ah_attr->grh.hop_limit);
- new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1b);
-
- /* set sgid in grh.word_1 */
- if (ah_attr->ah_flags & IB_AH_GRH) {
- int rc;
- struct ib_port_attr port_attr;
- union ib_gid gid;
-
- memset(&port_attr, 0, sizeof(port_attr));
- rc = ehca_query_port(ah->device, ah_attr->port_num,
- &port_attr);
- if (rc) { /* invalid port number */
- ehca_err(ah->device, "Invalid port number "
- "ehca_query_port() returned %x "
- "ah=%p ah_attr=%p port_num=%x",
- rc, ah, ah_attr, ah_attr->port_num);
- return -EINVAL;
- }
- memset(&gid, 0, sizeof(gid));
- rc = ehca_query_gid(ah->device,
- ah_attr->port_num,
- ah_attr->grh.sgid_index, &gid);
- if (rc) {
- ehca_err(ah->device, "Failed to retrieve sgid "
- "ehca_query_gid() returned %x "
- "ah=%p ah_attr=%p port_num=%x "
- "sgid_index=%x",
- rc, ah, ah_attr, ah_attr->port_num,
- ah_attr->grh.sgid_index);
- return -EINVAL;
- }
- memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid));
- }
-
- new_ehca_av.pmtu = shca->max_mtu;
-
- memcpy(&new_ehca_av.grh.word_3, &ah_attr->grh.dgid,
- sizeof(ah_attr->grh.dgid));
-
- av = container_of(ah, struct ehca_av, ib_ah);
- av->av = new_ehca_av;
-
- return 0;
-}
-
-int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
-{
- struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah);
-
- memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3,
- sizeof(ah_attr->grh.dgid));
- ah_attr->sl = av->av.sl;
-
- ah_attr->dlid = av->av.dlid;
-
- ah_attr->src_path_bits = av->av.slid_path_bits;
- ah_attr->static_rate = av->av.ipd;
- ah_attr->ah_flags = EHCA_BMASK_GET(GRH_FLAG_MASK, av->av.lnh);
- ah_attr->grh.traffic_class = EHCA_BMASK_GET(GRH_TCLASS_MASK,
- av->av.grh.word_0);
- ah_attr->grh.hop_limit = EHCA_BMASK_GET(GRH_HOPLIMIT_MASK,
- av->av.grh.word_0);
- ah_attr->grh.flow_label = EHCA_BMASK_GET(GRH_FLOWLABEL_MASK,
- av->av.grh.word_0);
-
- return 0;
-}
-
-int ehca_destroy_ah(struct ib_ah *ah)
-{
- kmem_cache_free(av_cache, container_of(ah, struct ehca_av, ib_ah));
-
- return 0;
-}
-
-int ehca_init_av_cache(void)
-{
- av_cache = kmem_cache_create("ehca_cache_av",
- sizeof(struct ehca_av), 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!av_cache)
- return -ENOMEM;
- return 0;
-}
-
-void ehca_cleanup_av_cache(void)
-{
- kmem_cache_destroy(av_cache);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_classes.h b/drivers/staging/rdma/ehca/ehca_classes.h
deleted file mode 100644
index e8c3387d7aaa..000000000000
--- a/drivers/staging/rdma/ehca/ehca_classes.h
+++ /dev/null
@@ -1,481 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * Struct definition for eHCA internal structures
- *
- * Authors: Heiko J Schick <schickhj@de.ibm.com>
- * Christoph Raisch <raisch@de.ibm.com>
- * Joachim Fenkes <fenkes@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __EHCA_CLASSES_H__
-#define __EHCA_CLASSES_H__
-
-struct ehca_module;
-struct ehca_qp;
-struct ehca_cq;
-struct ehca_eq;
-struct ehca_mr;
-struct ehca_mw;
-struct ehca_pd;
-struct ehca_av;
-
-#include <linux/wait.h>
-#include <linux/mutex.h>
-
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_user_verbs.h>
-
-#ifdef CONFIG_PPC64
-#include "ehca_classes_pSeries.h"
-#endif
-#include "ipz_pt_fn.h"
-#include "ehca_qes.h"
-#include "ehca_irq.h"
-
-#define EHCA_EQE_CACHE_SIZE 20
-#define EHCA_MAX_NUM_QUEUES 0xffff
-
-struct ehca_eqe_cache_entry {
- struct ehca_eqe *eqe;
- struct ehca_cq *cq;
-};
-
-struct ehca_eq {
- u32 length;
- struct ipz_queue ipz_queue;
- struct ipz_eq_handle ipz_eq_handle;
- struct work_struct work;
- struct h_galpas galpas;
- int is_initialized;
- struct ehca_pfeq pf;
- spinlock_t spinlock;
- struct tasklet_struct interrupt_task;
- u32 ist;
- spinlock_t irq_spinlock;
- struct ehca_eqe_cache_entry eqe_cache[EHCA_EQE_CACHE_SIZE];
-};
-
-struct ehca_sma_attr {
- u16 lid, lmc, sm_sl, sm_lid;
- u16 pkey_tbl_len, pkeys[16];
-};
-
-struct ehca_sport {
- struct ib_cq *ibcq_aqp1;
- struct ib_qp *ibqp_sqp[2];
- /* lock to serialze modify_qp() calls for sqp in normal
- * and irq path (when event PORT_ACTIVE is received first time)
- */
- spinlock_t mod_sqp_lock;
- enum ib_port_state port_state;
- struct ehca_sma_attr saved_attr;
- u32 pma_qp_nr;
-};
-
-#define HCA_CAP_MR_PGSIZE_4K 0x80000000
-#define HCA_CAP_MR_PGSIZE_64K 0x40000000
-#define HCA_CAP_MR_PGSIZE_1M 0x20000000
-#define HCA_CAP_MR_PGSIZE_16M 0x10000000
-
-struct ehca_shca {
- struct ib_device ib_device;
- struct platform_device *ofdev;
- u8 num_ports;
- int hw_level;
- struct list_head shca_list;
- struct ipz_adapter_handle ipz_hca_handle;
- struct ehca_sport sport[2];
- struct ehca_eq eq;
- struct ehca_eq neq;
- struct ehca_mr *maxmr;
- struct ehca_pd *pd;
- struct h_galpas galpas;
- struct mutex modify_mutex;
- u64 hca_cap;
- /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */
- u32 hca_cap_mr_pgsize;
- int max_mtu;
- int max_num_qps;
- int max_num_cqs;
- atomic_t num_cqs;
- atomic_t num_qps;
-};
-
-struct ehca_pd {
- struct ib_pd ib_pd;
- struct ipz_pd fw_pd;
- /* small queue mgmt */
- struct mutex lock;
- struct list_head free[2];
- struct list_head full[2];
-};
-
-enum ehca_ext_qp_type {
- EQPT_NORMAL = 0,
- EQPT_LLQP = 1,
- EQPT_SRQBASE = 2,
- EQPT_SRQ = 3,
-};
-
-/* struct to cache modify_qp()'s parms for GSI/SMI qp */
-struct ehca_mod_qp_parm {
- int mask;
- struct ib_qp_attr attr;
-};
-
-#define EHCA_MOD_QP_PARM_MAX 4
-
-#define QMAP_IDX_MASK 0xFFFFULL
-
-/* struct for tracking if cqes have been reported to the application */
-struct ehca_qmap_entry {
- u16 app_wr_id;
- u8 reported;
- u8 cqe_req;
-};
-
-struct ehca_queue_map {
- struct ehca_qmap_entry *map;
- unsigned int entries;
- unsigned int tail;
- unsigned int left_to_poll;
- unsigned int next_wqe_idx; /* Idx to first wqe to be flushed */
-};
-
-/* function to calculate the next index for the qmap */
-static inline unsigned int next_index(unsigned int cur_index, unsigned int limit)
-{
- unsigned int temp = cur_index + 1;
- return (temp == limit) ? 0 : temp;
-}
-
-struct ehca_qp {
- union {
- struct ib_qp ib_qp;
- struct ib_srq ib_srq;
- };
- u32 qp_type;
- enum ehca_ext_qp_type ext_type;
- enum ib_qp_state state;
- struct ipz_queue ipz_squeue;
- struct ehca_queue_map sq_map;
- struct ipz_queue ipz_rqueue;
- struct ehca_queue_map rq_map;
- struct h_galpas galpas;
- u32 qkey;
- u32 real_qp_num;
- u32 token;
- spinlock_t spinlock_s;
- spinlock_t spinlock_r;
- u32 sq_max_inline_data_size;
- struct ipz_qp_handle ipz_qp_handle;
- struct ehca_pfqp pf;
- struct ib_qp_init_attr init_attr;
- struct ehca_cq *send_cq;
- struct ehca_cq *recv_cq;
- unsigned int sqerr_purgeflag;
- struct hlist_node list_entries;
- /* array to cache modify_qp()'s parms for GSI/SMI qp */
- struct ehca_mod_qp_parm *mod_qp_parm;
- int mod_qp_parm_idx;
- /* mmap counter for resources mapped into user space */
- u32 mm_count_squeue;
- u32 mm_count_rqueue;
- u32 mm_count_galpa;
- /* unsolicited ack circumvention */
- int unsol_ack_circ;
- int mtu_shift;
- u32 message_count;
- u32 packet_count;
- atomic_t nr_events; /* events seen */
- wait_queue_head_t wait_completion;
- int mig_armed;
- struct list_head sq_err_node;
- struct list_head rq_err_node;
-};
-
-#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
-#define HAS_SQ(qp) (qp->ext_type != EQPT_SRQ)
-#define HAS_RQ(qp) (qp->ext_type != EQPT_SRQBASE)
-
-/* must be power of 2 */
-#define QP_HASHTAB_LEN 8
-
-struct ehca_cq {
- struct ib_cq ib_cq;
- struct ipz_queue ipz_queue;
- struct h_galpas galpas;
- spinlock_t spinlock;
- u32 cq_number;
- u32 token;
- u32 nr_of_entries;
- struct ipz_cq_handle ipz_cq_handle;
- struct ehca_pfcq pf;
- spinlock_t cb_lock;
- struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
- struct list_head entry;
- u32 nr_callbacks; /* #events assigned to cpu by scaling code */
- atomic_t nr_events; /* #events seen */
- wait_queue_head_t wait_completion;
- spinlock_t task_lock;
- /* mmap counter for resources mapped into user space */
- u32 mm_count_queue;
- u32 mm_count_galpa;
- struct list_head sqp_err_list;
- struct list_head rqp_err_list;
-};
-
-enum ehca_mr_flag {
- EHCA_MR_FLAG_FMR = 0x80000000, /* FMR, created with ehca_alloc_fmr */
- EHCA_MR_FLAG_MAXMR = 0x40000000, /* max-MR */
-};
-
-struct ehca_mr {
- union {
- struct ib_mr ib_mr; /* must always be first in ehca_mr */
- struct ib_fmr ib_fmr; /* must always be first in ehca_mr */
- } ib;
- struct ib_umem *umem;
- spinlock_t mrlock;
-
- enum ehca_mr_flag flags;
- u32 num_kpages; /* number of kernel pages */
- u32 num_hwpages; /* number of hw pages to form MR */
- u64 hwpage_size; /* hw page size used for this MR */
- int acl; /* ACL (stored here for usage in reregister) */
- u64 *start; /* virtual start address (stored here for */
- /* usage in reregister) */
- u64 size; /* size (stored here for usage in reregister) */
- u32 fmr_page_size; /* page size for FMR */
- u32 fmr_max_pages; /* max pages for FMR */
- u32 fmr_max_maps; /* max outstanding maps for FMR */
- u32 fmr_map_cnt; /* map counter for FMR */
- /* fw specific data */
- struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */
- struct h_galpas galpas;
-};
-
-struct ehca_mw {
- struct ib_mw ib_mw; /* gen2 mw, must always be first in ehca_mw */
- spinlock_t mwlock;
-
- u8 never_bound; /* indication MW was never bound */
- struct ipz_mrmw_handle ipz_mw_handle; /* MW handle for h-calls */
- struct h_galpas galpas;
-};
-
-enum ehca_mr_pgi_type {
- EHCA_MR_PGI_PHYS = 1, /* type of ehca_reg_phys_mr,
- * ehca_rereg_phys_mr,
- * ehca_reg_internal_maxmr */
- EHCA_MR_PGI_USER = 2, /* type of ehca_reg_user_mr */
- EHCA_MR_PGI_FMR = 3 /* type of ehca_map_phys_fmr */
-};
-
-struct ehca_mr_pginfo {
- enum ehca_mr_pgi_type type;
- u64 num_kpages;
- u64 kpage_cnt;
- u64 hwpage_size; /* hw page size used for this MR */
- u64 num_hwpages; /* number of hw pages */
- u64 hwpage_cnt; /* counter for hw pages */
- u64 next_hwpage; /* next hw page in buffer/chunk/listelem */
-
- union {
- struct { /* type EHCA_MR_PGI_PHYS section */
- u64 addr;
- u16 size;
- } phy;
- struct { /* type EHCA_MR_PGI_USER section */
- struct ib_umem *region;
- struct scatterlist *next_sg;
- u64 next_nmap;
- } usr;
- struct { /* type EHCA_MR_PGI_FMR section */
- u64 fmr_pgsize;
- u64 *page_list;
- u64 next_listelem;
- } fmr;
- } u;
-};
-
-/* output parameters for MR/FMR hipz calls */
-struct ehca_mr_hipzout_parms {
- struct ipz_mrmw_handle handle;
- u32 lkey;
- u32 rkey;
- u64 len;
- u64 vaddr;
- u32 acl;
-};
-
-/* output parameters for MW hipz calls */
-struct ehca_mw_hipzout_parms {
- struct ipz_mrmw_handle handle;
- u32 rkey;
-};
-
-struct ehca_av {
- struct ib_ah ib_ah;
- struct ehca_ud_av av;
-};
-
-struct ehca_ucontext {
- struct ib_ucontext ib_ucontext;
-};
-
-int ehca_init_pd_cache(void);
-void ehca_cleanup_pd_cache(void);
-int ehca_init_cq_cache(void);
-void ehca_cleanup_cq_cache(void);
-int ehca_init_qp_cache(void);
-void ehca_cleanup_qp_cache(void);
-int ehca_init_av_cache(void);
-void ehca_cleanup_av_cache(void);
-int ehca_init_mrmw_cache(void);
-void ehca_cleanup_mrmw_cache(void);
-int ehca_init_small_qp_cache(void);
-void ehca_cleanup_small_qp_cache(void);
-
-extern rwlock_t ehca_qp_idr_lock;
-extern rwlock_t ehca_cq_idr_lock;
-extern struct idr ehca_qp_idr;
-extern struct idr ehca_cq_idr;
-extern spinlock_t shca_list_lock;
-
-extern int ehca_static_rate;
-extern int ehca_port_act_time;
-extern bool ehca_use_hp_mr;
-extern bool ehca_scaling_code;
-extern int ehca_lock_hcalls;
-extern int ehca_nr_ports;
-extern int ehca_max_cq;
-extern int ehca_max_qp;
-
-struct ipzu_queue_resp {
- u32 qe_size; /* queue entry size */
- u32 act_nr_of_sg;
- u32 queue_length; /* queue length allocated in bytes */
- u32 pagesize;
- u32 toggle_state;
- u32 offset; /* save offset within a page for small_qp */
-};
-
-struct ehca_create_cq_resp {
- u32 cq_number;
- u32 token;
- struct ipzu_queue_resp ipz_queue;
- u32 fw_handle_ofs;
- u32 dummy;
-};
-
-struct ehca_create_qp_resp {
- u32 qp_num;
- u32 token;
- u32 qp_type;
- u32 ext_type;
- u32 qkey;
- /* qp_num assigned by ehca: sqp0/1 may have got different numbers */
- u32 real_qp_num;
- u32 fw_handle_ofs;
- u32 dummy;
- struct ipzu_queue_resp ipz_squeue;
- struct ipzu_queue_resp ipz_rqueue;
-};
-
-struct ehca_alloc_cq_parms {
- u32 nr_cqe;
- u32 act_nr_of_entries;
- u32 act_pages;
- struct ipz_eq_handle eq_handle;
-};
-
-enum ehca_service_type {
- ST_RC = 0,
- ST_UC = 1,
- ST_RD = 2,
- ST_UD = 3,
-};
-
-enum ehca_ll_comp_flags {
- LLQP_SEND_COMP = 0x20,
- LLQP_RECV_COMP = 0x40,
- LLQP_COMP_MASK = 0x60,
-};
-
-struct ehca_alloc_queue_parms {
- /* input parameters */
- int max_wr;
- int max_sge;
- int page_size;
- int is_small;
-
- /* output parameters */
- u16 act_nr_wqes;
- u8 act_nr_sges;
- u32 queue_size; /* bytes for small queues, pages otherwise */
-};
-
-struct ehca_alloc_qp_parms {
- struct ehca_alloc_queue_parms squeue;
- struct ehca_alloc_queue_parms rqueue;
-
- /* input parameters */
- enum ehca_service_type servicetype;
- int qp_storage;
- int sigtype;
- enum ehca_ext_qp_type ext_type;
- enum ehca_ll_comp_flags ll_comp_flags;
- int ud_av_l_key_ctl;
-
- u32 token;
- struct ipz_eq_handle eq_handle;
- struct ipz_pd pd;
- struct ipz_cq_handle send_cq_handle, recv_cq_handle;
-
- u32 srq_qpn, srq_token, srq_limit;
-
- /* output parameters */
- u32 real_qp_num;
- struct ipz_qp_handle qp_handle;
- struct h_galpas galpas;
-};
-
-int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
-int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num);
-struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
-
-#endif
diff --git a/drivers/staging/rdma/ehca/ehca_classes_pSeries.h b/drivers/staging/rdma/ehca/ehca_classes_pSeries.h
deleted file mode 100644
index 689c35786dd2..000000000000
--- a/drivers/staging/rdma/ehca/ehca_classes_pSeries.h
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * pSeries interface definitions
- *
- * Authors: Waleri Fomin <fomin@de.ibm.com>
- * Christoph Raisch <raisch@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __EHCA_CLASSES_PSERIES_H__
-#define __EHCA_CLASSES_PSERIES_H__
-
-#include "hcp_phyp.h"
-#include "ipz_pt_fn.h"
-
-
-struct ehca_pfqp {
- struct ipz_qpt sqpt;
- struct ipz_qpt rqpt;
-};
-
-struct ehca_pfcq {
- struct ipz_qpt qpt;
- u32 cqnr;
-};
-
-struct ehca_pfeq {
- struct ipz_qpt qpt;
- struct h_galpa galpa;
- u32 eqnr;
-};
-
-struct ipz_adapter_handle {
- u64 handle;
-};
-
-struct ipz_cq_handle {
- u64 handle;
-};
-
-struct ipz_eq_handle {
- u64 handle;
-};
-
-struct ipz_qp_handle {
- u64 handle;
-};
-struct ipz_mrmw_handle {
- u64 handle;
-};
-
-struct ipz_pd {
- u32 value;
-};
-
-struct hcp_modify_qp_control_block {
- u32 qkey; /* 00 */
- u32 rdd; /* reliable datagram domain */
- u32 send_psn; /* 02 */
- u32 receive_psn; /* 03 */
- u32 prim_phys_port; /* 04 */
- u32 alt_phys_port; /* 05 */
- u32 prim_p_key_idx; /* 06 */
- u32 alt_p_key_idx; /* 07 */
- u32 rdma_atomic_ctrl; /* 08 */
- u32 qp_state; /* 09 */
- u32 reserved_10; /* 10 */
- u32 rdma_nr_atomic_resp_res; /* 11 */
- u32 path_migration_state; /* 12 */
- u32 rdma_atomic_outst_dest_qp; /* 13 */
- u32 dest_qp_nr; /* 14 */
- u32 min_rnr_nak_timer_field; /* 15 */
- u32 service_level; /* 16 */
- u32 send_grh_flag; /* 17 */
- u32 retry_count; /* 18 */
- u32 timeout; /* 19 */
- u32 path_mtu; /* 20 */
- u32 max_static_rate; /* 21 */
- u32 dlid; /* 22 */
- u32 rnr_retry_count; /* 23 */
- u32 source_path_bits; /* 24 */
- u32 traffic_class; /* 25 */
- u32 hop_limit; /* 26 */
- u32 source_gid_idx; /* 27 */
- u32 flow_label; /* 28 */
- u32 reserved_29; /* 29 */
- union { /* 30 */
- u64 dw[2];
- u8 byte[16];
- } dest_gid;
- u32 service_level_al; /* 34 */
- u32 send_grh_flag_al; /* 35 */
- u32 retry_count_al; /* 36 */
- u32 timeout_al; /* 37 */
- u32 max_static_rate_al; /* 38 */
- u32 dlid_al; /* 39 */
- u32 rnr_retry_count_al; /* 40 */
- u32 source_path_bits_al; /* 41 */
- u32 traffic_class_al; /* 42 */
- u32 hop_limit_al; /* 43 */
- u32 source_gid_idx_al; /* 44 */
- u32 flow_label_al; /* 45 */
- u32 reserved_46; /* 46 */
- u32 reserved_47; /* 47 */
- union { /* 48 */
- u64 dw[2];
- u8 byte[16];
- } dest_gid_al;
- u32 max_nr_outst_send_wr; /* 52 */
- u32 max_nr_outst_recv_wr; /* 53 */
- u32 disable_ete_credit_check; /* 54 */
- u32 qp_number; /* 55 */
- u64 send_queue_handle; /* 56 */
- u64 recv_queue_handle; /* 58 */
- u32 actual_nr_sges_in_sq_wqe; /* 60 */
- u32 actual_nr_sges_in_rq_wqe; /* 61 */
- u32 qp_enable; /* 62 */
- u32 curr_srq_limit; /* 63 */
- u64 qp_aff_asyn_ev_log_reg; /* 64 */
- u64 shared_rq_hndl; /* 66 */
- u64 trigg_doorbell_qp_hndl; /* 68 */
- u32 reserved_70_127[58]; /* 70 */
-};
-
-#define MQPCB_MASK_QKEY EHCA_BMASK_IBM( 0, 0)
-#define MQPCB_MASK_SEND_PSN EHCA_BMASK_IBM( 2, 2)
-#define MQPCB_MASK_RECEIVE_PSN EHCA_BMASK_IBM( 3, 3)
-#define MQPCB_MASK_PRIM_PHYS_PORT EHCA_BMASK_IBM( 4, 4)
-#define MQPCB_PRIM_PHYS_PORT EHCA_BMASK_IBM(24, 31)
-#define MQPCB_MASK_ALT_PHYS_PORT EHCA_BMASK_IBM( 5, 5)
-#define MQPCB_MASK_PRIM_P_KEY_IDX EHCA_BMASK_IBM( 6, 6)
-#define MQPCB_PRIM_P_KEY_IDX EHCA_BMASK_IBM(24, 31)
-#define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7)
-#define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8)
-#define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9)
-#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11)
-#define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12)
-#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13)
-#define MQPCB_MASK_DEST_QP_NR EHCA_BMASK_IBM(14, 14)
-#define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD EHCA_BMASK_IBM(15, 15)
-#define MQPCB_MASK_SERVICE_LEVEL EHCA_BMASK_IBM(16, 16)
-#define MQPCB_MASK_SEND_GRH_FLAG EHCA_BMASK_IBM(17, 17)
-#define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18)
-#define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19)
-#define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20)
-#define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21)
-#define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22)
-#define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23)
-#define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24)
-#define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25)
-#define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26)
-#define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27)
-#define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28)
-#define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30)
-#define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31)
-#define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32)
-#define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33)
-#define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34)
-#define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35)
-#define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36)
-#define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37)
-#define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38)
-#define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39)
-#define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40)
-#define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41)
-#define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42)
-#define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44)
-#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45)
-#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46)
-#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47)
-#define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48)
-#define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49)
-#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50)
-#define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51)
-
-#endif /* __EHCA_CLASSES_PSERIES_H__ */
diff --git a/drivers/staging/rdma/ehca/ehca_cq.c b/drivers/staging/rdma/ehca/ehca_cq.c
deleted file mode 100644
index 1aa7931fe860..000000000000
--- a/drivers/staging/rdma/ehca/ehca_cq.c
+++ /dev/null
@@ -1,397 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * Completion queue handling
- *
- * Authors: Waleri Fomin <fomin@de.ibm.com>
- * Khadija Souissi <souissi@de.ibm.com>
- * Reinhard Ernst <rernst@de.ibm.com>
- * Heiko J Schick <schickhj@de.ibm.com>
- * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_iverbs.h"
-#include "ehca_classes.h"
-#include "ehca_irq.h"
-#include "hcp_if.h"
-
-static struct kmem_cache *cq_cache;
-
-int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp)
-{
- unsigned int qp_num = qp->real_qp_num;
- unsigned int key = qp_num & (QP_HASHTAB_LEN-1);
- unsigned long flags;
-
- spin_lock_irqsave(&cq->spinlock, flags);
- hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]);
- spin_unlock_irqrestore(&cq->spinlock, flags);
-
- ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x",
- cq->cq_number, qp_num);
-
- return 0;
-}
-
-int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num)
-{
- int ret = -EINVAL;
- unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
- struct hlist_node *iter;
- struct ehca_qp *qp;
- unsigned long flags;
-
- spin_lock_irqsave(&cq->spinlock, flags);
- hlist_for_each(iter, &cq->qp_hashtab[key]) {
- qp = hlist_entry(iter, struct ehca_qp, list_entries);
- if (qp->real_qp_num == real_qp_num) {
- hlist_del(iter);
- ehca_dbg(cq->ib_cq.device,
- "removed qp from cq .cq_num=%x real_qp_num=%x",
- cq->cq_number, real_qp_num);
- ret = 0;
- break;
- }
- }
- spin_unlock_irqrestore(&cq->spinlock, flags);
- if (ret)
- ehca_err(cq->ib_cq.device,
- "qp not found cq_num=%x real_qp_num=%x",
- cq->cq_number, real_qp_num);
-
- return ret;
-}
-
-struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
-{
- struct ehca_qp *ret = NULL;
- unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
- struct hlist_node *iter;
- struct ehca_qp *qp;
- hlist_for_each(iter, &cq->qp_hashtab[key]) {
- qp = hlist_entry(iter, struct ehca_qp, list_entries);
- if (qp->real_qp_num == real_qp_num) {
- ret = qp;
- break;
- }
- }
- return ret;
-}
-
-struct ib_cq *ehca_create_cq(struct ib_device *device,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata)
-{
- int cqe = attr->cqe;
- static const u32 additional_cqe = 20;
- struct ib_cq *cq;
- struct ehca_cq *my_cq;
- struct ehca_shca *shca =
- container_of(device, struct ehca_shca, ib_device);
- struct ipz_adapter_handle adapter_handle;
- struct ehca_alloc_cq_parms param; /* h_call's out parameters */
- struct h_galpa gal;
- void *vpage;
- u32 counter;
- u64 rpage, cqx_fec, h_ret;
- int rc, i;
- unsigned long flags;
-
- if (attr->flags)
- return ERR_PTR(-EINVAL);
-
- if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
- return ERR_PTR(-EINVAL);
-
- if (!atomic_add_unless(&shca->num_cqs, 1, shca->max_num_cqs)) {
- ehca_err(device, "Unable to create CQ, max number of %i "
- "CQs reached.", shca->max_num_cqs);
- ehca_err(device, "To increase the maximum number of CQs "
- "use the number_of_cqs module parameter.\n");
- return ERR_PTR(-ENOSPC);
- }
-
- my_cq = kmem_cache_zalloc(cq_cache, GFP_KERNEL);
- if (!my_cq) {
- ehca_err(device, "Out of memory for ehca_cq struct device=%p",
- device);
- atomic_dec(&shca->num_cqs);
- return ERR_PTR(-ENOMEM);
- }
-
- memset(&param, 0, sizeof(struct ehca_alloc_cq_parms));
-
- spin_lock_init(&my_cq->spinlock);
- spin_lock_init(&my_cq->cb_lock);
- spin_lock_init(&my_cq->task_lock);
- atomic_set(&my_cq->nr_events, 0);
- init_waitqueue_head(&my_cq->wait_completion);
-
- cq = &my_cq->ib_cq;
-
- adapter_handle = shca->ipz_hca_handle;
- param.eq_handle = shca->eq.ipz_eq_handle;
-
- idr_preload(GFP_KERNEL);
- write_lock_irqsave(&ehca_cq_idr_lock, flags);
- rc = idr_alloc(&ehca_cq_idr, my_cq, 0, 0x2000000, GFP_NOWAIT);
- write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
- idr_preload_end();
-
- if (rc < 0) {
- cq = ERR_PTR(-ENOMEM);
- ehca_err(device, "Can't allocate new idr entry. device=%p",
- device);
- goto create_cq_exit1;
- }
- my_cq->token = rc;
-
- /*
- * CQs maximum depth is 4GB-64, but we need additional 20 as buffer
- * for receiving errors CQEs.
- */
- param.nr_cqe = cqe + additional_cqe;
- h_ret = hipz_h_alloc_resource_cq(adapter_handle, my_cq, &param);
-
- if (h_ret != H_SUCCESS) {
- ehca_err(device, "hipz_h_alloc_resource_cq() failed "
- "h_ret=%lli device=%p", h_ret, device);
- cq = ERR_PTR(ehca2ib_return_code(h_ret));
- goto create_cq_exit2;
- }
-
- rc = ipz_queue_ctor(NULL, &my_cq->ipz_queue, param.act_pages,
- EHCA_PAGESIZE, sizeof(struct ehca_cqe), 0, 0);
- if (!rc) {
- ehca_err(device, "ipz_queue_ctor() failed ipz_rc=%i device=%p",
- rc, device);
- cq = ERR_PTR(-EINVAL);
- goto create_cq_exit3;
- }
-
- for (counter = 0; counter < param.act_pages; counter++) {
- vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
- if (!vpage) {
- ehca_err(device, "ipz_qpageit_get_inc() "
- "returns NULL device=%p", device);
- cq = ERR_PTR(-EAGAIN);
- goto create_cq_exit4;
- }
- rpage = __pa(vpage);
-
- h_ret = hipz_h_register_rpage_cq(adapter_handle,
- my_cq->ipz_cq_handle,
- &my_cq->pf,
- 0,
- 0,
- rpage,
- 1,
- my_cq->galpas.
- kernel);
-
- if (h_ret < H_SUCCESS) {
- ehca_err(device, "hipz_h_register_rpage_cq() failed "
- "ehca_cq=%p cq_num=%x h_ret=%lli counter=%i "
- "act_pages=%i", my_cq, my_cq->cq_number,
- h_ret, counter, param.act_pages);
- cq = ERR_PTR(-EINVAL);
- goto create_cq_exit4;
- }
-
- if (counter == (param.act_pages - 1)) {
- vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
- if ((h_ret != H_SUCCESS) || vpage) {
- ehca_err(device, "Registration of pages not "
- "complete ehca_cq=%p cq_num=%x "
- "h_ret=%lli", my_cq, my_cq->cq_number,
- h_ret);
- cq = ERR_PTR(-EAGAIN);
- goto create_cq_exit4;
- }
- } else {
- if (h_ret != H_PAGE_REGISTERED) {
- ehca_err(device, "Registration of page failed "
- "ehca_cq=%p cq_num=%x h_ret=%lli "
- "counter=%i act_pages=%i",
- my_cq, my_cq->cq_number,
- h_ret, counter, param.act_pages);
- cq = ERR_PTR(-ENOMEM);
- goto create_cq_exit4;
- }
- }
- }
-
- ipz_qeit_reset(&my_cq->ipz_queue);
-
- gal = my_cq->galpas.kernel;
- cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec));
- ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%llx",
- my_cq, my_cq->cq_number, cqx_fec);
-
- my_cq->ib_cq.cqe = my_cq->nr_of_entries =
- param.act_nr_of_entries - additional_cqe;
- my_cq->cq_number = (my_cq->ipz_cq_handle.handle) & 0xffff;
-
- for (i = 0; i < QP_HASHTAB_LEN; i++)
- INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]);
-
- INIT_LIST_HEAD(&my_cq->sqp_err_list);
- INIT_LIST_HEAD(&my_cq->rqp_err_list);
-
- if (context) {
- struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
- struct ehca_create_cq_resp resp;
- memset(&resp, 0, sizeof(resp));
- resp.cq_number = my_cq->cq_number;
- resp.token = my_cq->token;
- resp.ipz_queue.qe_size = ipz_queue->qe_size;
- resp.ipz_queue.act_nr_of_sg = ipz_queue->act_nr_of_sg;
- resp.ipz_queue.queue_length = ipz_queue->queue_length;
- resp.ipz_queue.pagesize = ipz_queue->pagesize;
- resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
- resp.fw_handle_ofs = (u32)
- (my_cq->galpas.user.fw_handle & (PAGE_SIZE - 1));
- if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
- ehca_err(device, "Copy to udata failed.");
- cq = ERR_PTR(-EFAULT);
- goto create_cq_exit4;
- }
- }
-
- return cq;
-
-create_cq_exit4:
- ipz_queue_dtor(NULL, &my_cq->ipz_queue);
-
-create_cq_exit3:
- h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
- if (h_ret != H_SUCCESS)
- ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p "
- "cq_num=%x h_ret=%lli", my_cq, my_cq->cq_number, h_ret);
-
-create_cq_exit2:
- write_lock_irqsave(&ehca_cq_idr_lock, flags);
- idr_remove(&ehca_cq_idr, my_cq->token);
- write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
-
-create_cq_exit1:
- kmem_cache_free(cq_cache, my_cq);
-
- atomic_dec(&shca->num_cqs);
- return cq;
-}
-
-int ehca_destroy_cq(struct ib_cq *cq)
-{
- u64 h_ret;
- struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
- int cq_num = my_cq->cq_number;
- struct ib_device *device = cq->device;
- struct ehca_shca *shca = container_of(device, struct ehca_shca,
- ib_device);
- struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
- unsigned long flags;
-
- if (cq->uobject) {
- if (my_cq->mm_count_galpa || my_cq->mm_count_queue) {
- ehca_err(device, "Resources still referenced in "
- "user space cq_num=%x", my_cq->cq_number);
- return -EINVAL;
- }
- }
-
- /*
- * remove the CQ from the idr first to make sure
- * no more interrupt tasklets will touch this CQ
- */
- write_lock_irqsave(&ehca_cq_idr_lock, flags);
- idr_remove(&ehca_cq_idr, my_cq->token);
- write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
-
- /* now wait until all pending events have completed */
- wait_event(my_cq->wait_completion, !atomic_read(&my_cq->nr_events));
-
- /* nobody's using our CQ any longer -- we can destroy it */
- h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
- if (h_ret == H_R_STATE) {
- /* cq in err: read err data and destroy it forcibly */
- ehca_dbg(device, "ehca_cq=%p cq_num=%x resource=%llx in err "
- "state. Try to delete it forcibly.",
- my_cq, cq_num, my_cq->ipz_cq_handle.handle);
- ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle);
- h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
- if (h_ret == H_SUCCESS)
- ehca_dbg(device, "cq_num=%x deleted successfully.",
- cq_num);
- }
- if (h_ret != H_SUCCESS) {
- ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lli "
- "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
- return ehca2ib_return_code(h_ret);
- }
- ipz_queue_dtor(NULL, &my_cq->ipz_queue);
- kmem_cache_free(cq_cache, my_cq);
-
- atomic_dec(&shca->num_cqs);
- return 0;
-}
-
-int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
-{
- /* TODO: proper resize needs to be done */
- ehca_err(cq->device, "not implemented yet");
-
- return -EFAULT;
-}
-
-int ehca_init_cq_cache(void)
-{
- cq_cache = kmem_cache_create("ehca_cache_cq",
- sizeof(struct ehca_cq), 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!cq_cache)
- return -ENOMEM;
- return 0;
-}
-
-void ehca_cleanup_cq_cache(void)
-{
- kmem_cache_destroy(cq_cache);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_eq.c b/drivers/staging/rdma/ehca/ehca_eq.c
deleted file mode 100644
index 90da6747d395..000000000000
--- a/drivers/staging/rdma/ehca/ehca_eq.c
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * Event queue handling
- *
- * Authors: Waleri Fomin <fomin@de.ibm.com>
- * Khadija Souissi <souissi@de.ibm.com>
- * Reinhard Ernst <rernst@de.ibm.com>
- * Heiko J Schick <schickhj@de.ibm.com>
- * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "ehca_classes.h"
-#include "ehca_irq.h"
-#include "ehca_iverbs.h"
-#include "ehca_qes.h"
-#include "hcp_if.h"
-#include "ipz_pt_fn.h"
-
-int ehca_create_eq(struct ehca_shca *shca,
- struct ehca_eq *eq,
- const enum ehca_eq_type type, const u32 length)
-{
- int ret;
- u64 h_ret;
- u32 nr_pages;
- u32 i;
- void *vpage;
- struct ib_device *ib_dev = &shca->ib_device;
-
- spin_lock_init(&eq->spinlock);
- spin_lock_init(&eq->irq_spinlock);
- eq->is_initialized = 0;
-
- if (type != EHCA_EQ && type != EHCA_NEQ) {
- ehca_err(ib_dev, "Invalid EQ type %x. eq=%p", type, eq);
- return -EINVAL;
- }
- if (!length) {
- ehca_err(ib_dev, "EQ length must not be zero. eq=%p", eq);
- return -EINVAL;
- }
-
- h_ret = hipz_h_alloc_resource_eq(shca->ipz_hca_handle,
- &eq->pf,
- type,
- length,
- &eq->ipz_eq_handle,
- &eq->length,
- &nr_pages, &eq->ist);
-
- if (h_ret != H_SUCCESS) {
- ehca_err(ib_dev, "Can't allocate EQ/NEQ. eq=%p", eq);
- return -EINVAL;
- }
-
- ret = ipz_queue_ctor(NULL, &eq->ipz_queue, nr_pages,
- EHCA_PAGESIZE, sizeof(struct ehca_eqe), 0, 0);
- if (!ret) {
- ehca_err(ib_dev, "Can't allocate EQ pages eq=%p", eq);
- goto create_eq_exit1;
- }
-
- for (i = 0; i < nr_pages; i++) {
- u64 rpage;
-
- vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
- if (!vpage)
- goto create_eq_exit2;
-
- rpage = __pa(vpage);
- h_ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle,
- eq->ipz_eq_handle,
- &eq->pf,
- 0, 0, rpage, 1);
-
- if (i == (nr_pages - 1)) {
- /* last page */
- vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
- if (h_ret != H_SUCCESS || vpage)
- goto create_eq_exit2;
- } else {
- if (h_ret != H_PAGE_REGISTERED)
- goto create_eq_exit2;
- }
- }
-
- ipz_qeit_reset(&eq->ipz_queue);
-
- /* register interrupt handlers and initialize work queues */
- if (type == EHCA_EQ) {
- tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
-
- ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq,
- 0, "ehca_eq",
- (void *)shca);
- if (ret < 0)
- ehca_err(ib_dev, "Can't map interrupt handler.");
- } else if (type == EHCA_NEQ) {
- tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca);
-
- ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq,
- 0, "ehca_neq",
- (void *)shca);
- if (ret < 0)
- ehca_err(ib_dev, "Can't map interrupt handler.");
- }
-
- eq->is_initialized = 1;
-
- return 0;
-
-create_eq_exit2:
- ipz_queue_dtor(NULL, &eq->ipz_queue);
-
-create_eq_exit1:
- hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
-
- return -EINVAL;
-}
-
-void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq)
-{
- unsigned long flags;
- void *eqe;
-
- spin_lock_irqsave(&eq->spinlock, flags);
- eqe = ipz_eqit_eq_get_inc_valid(&eq->ipz_queue);
- spin_unlock_irqrestore(&eq->spinlock, flags);
-
- return eqe;
-}
-
-int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq)
-{
- unsigned long flags;
- u64 h_ret;
-
- ibmebus_free_irq(eq->ist, (void *)shca);
-
- spin_lock_irqsave(&shca_list_lock, flags);
- eq->is_initialized = 0;
- spin_unlock_irqrestore(&shca_list_lock, flags);
-
- tasklet_kill(&eq->interrupt_task);
-
- h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
-
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "Can't free EQ resources.");
- return -EINVAL;
- }
- ipz_queue_dtor(NULL, &eq->ipz_queue);
-
- return 0;
-}
diff --git a/drivers/staging/rdma/ehca/ehca_hca.c b/drivers/staging/rdma/ehca/ehca_hca.c
deleted file mode 100644
index e8b1bb65797a..000000000000
--- a/drivers/staging/rdma/ehca/ehca_hca.c
+++ /dev/null
@@ -1,414 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * HCA query functions
- *
- * Authors: Heiko J Schick <schickhj@de.ibm.com>
- * Christoph Raisch <raisch@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/gfp.h>
-
-#include "ehca_tools.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-
-static unsigned int limit_uint(unsigned int value)
-{
- return min_t(unsigned int, value, INT_MAX);
-}
-
-int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
- struct ib_udata *uhw)
-{
- int i, ret = 0;
- struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
- ib_device);
- struct hipz_query_hca *rblock;
-
- static const u32 cap_mapping[] = {
- IB_DEVICE_RESIZE_MAX_WR, HCA_CAP_WQE_RESIZE,
- IB_DEVICE_BAD_PKEY_CNTR, HCA_CAP_BAD_P_KEY_CTR,
- IB_DEVICE_BAD_QKEY_CNTR, HCA_CAP_Q_KEY_VIOL_CTR,
- IB_DEVICE_RAW_MULTI, HCA_CAP_RAW_PACKET_MCAST,
- IB_DEVICE_AUTO_PATH_MIG, HCA_CAP_AUTO_PATH_MIG,
- IB_DEVICE_CHANGE_PHY_PORT, HCA_CAP_SQD_RTS_PORT_CHANGE,
- IB_DEVICE_UD_AV_PORT_ENFORCE, HCA_CAP_AH_PORT_NR_CHECK,
- IB_DEVICE_CURR_QP_STATE_MOD, HCA_CAP_CUR_QP_STATE_MOD,
- IB_DEVICE_SHUTDOWN_PORT, HCA_CAP_SHUTDOWN_PORT,
- IB_DEVICE_INIT_TYPE, HCA_CAP_INIT_TYPE,
- IB_DEVICE_PORT_ACTIVE_EVENT, HCA_CAP_PORT_ACTIVE_EVENT,
- };
-
- if (uhw->inlen || uhw->outlen)
- return -EINVAL;
-
- rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
- if (!rblock) {
- ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
- return -ENOMEM;
- }
-
- if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
- ehca_err(&shca->ib_device, "Can't query device properties");
- ret = -EINVAL;
- goto query_device1;
- }
-
- memset(props, 0, sizeof(struct ib_device_attr));
- props->page_size_cap = shca->hca_cap_mr_pgsize;
- props->fw_ver = rblock->hw_ver;
- props->max_mr_size = rblock->max_mr_size;
- props->vendor_id = rblock->vendor_id >> 8;
- props->vendor_part_id = rblock->vendor_part_id >> 16;
- props->hw_ver = rblock->hw_ver;
- props->max_qp = limit_uint(rblock->max_qp);
- props->max_qp_wr = limit_uint(rblock->max_wqes_wq);
- props->max_sge = limit_uint(rblock->max_sge);
- props->max_sge_rd = limit_uint(rblock->max_sge_rd);
- props->max_cq = limit_uint(rblock->max_cq);
- props->max_cqe = limit_uint(rblock->max_cqe);
- props->max_mr = limit_uint(rblock->max_mr);
- props->max_mw = limit_uint(rblock->max_mw);
- props->max_pd = limit_uint(rblock->max_pd);
- props->max_ah = limit_uint(rblock->max_ah);
- props->max_ee = limit_uint(rblock->max_rd_ee_context);
- props->max_rdd = limit_uint(rblock->max_rd_domain);
- props->max_fmr = limit_uint(rblock->max_mr);
- props->max_qp_rd_atom = limit_uint(rblock->max_rr_qp);
- props->max_ee_rd_atom = limit_uint(rblock->max_rr_ee_context);
- props->max_res_rd_atom = limit_uint(rblock->max_rr_hca);
- props->max_qp_init_rd_atom = limit_uint(rblock->max_act_wqs_qp);
- props->max_ee_init_rd_atom = limit_uint(rblock->max_act_wqs_ee_context);
-
- if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
- props->max_srq = limit_uint(props->max_qp);
- props->max_srq_wr = limit_uint(props->max_qp_wr);
- props->max_srq_sge = 3;
- }
-
- props->max_pkeys = 16;
- /* Some FW versions say 0 here; insert sensible value in that case */
- props->local_ca_ack_delay = rblock->local_ca_ack_delay ?
- min_t(u8, rblock->local_ca_ack_delay, 255) : 12;
- props->max_raw_ipv6_qp = limit_uint(rblock->max_raw_ipv6_qp);
- props->max_raw_ethy_qp = limit_uint(rblock->max_raw_ethy_qp);
- props->max_mcast_grp = limit_uint(rblock->max_mcast_grp);
- props->max_mcast_qp_attach = limit_uint(rblock->max_mcast_qp_attach);
- props->max_total_mcast_qp_attach
- = limit_uint(rblock->max_total_mcast_qp_attach);
-
- /* translate device capabilities */
- props->device_cap_flags = IB_DEVICE_SYS_IMAGE_GUID |
- IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_N_NOTIFY_CQ;
- for (i = 0; i < ARRAY_SIZE(cap_mapping); i += 2)
- if (rblock->hca_cap_indicators & cap_mapping[i + 1])
- props->device_cap_flags |= cap_mapping[i];
-
-query_device1:
- ehca_free_fw_ctrlblock(rblock);
-
- return ret;
-}
-
-static enum ib_mtu map_mtu(struct ehca_shca *shca, u32 fw_mtu)
-{
- switch (fw_mtu) {
- case 0x1:
- return IB_MTU_256;
- case 0x2:
- return IB_MTU_512;
- case 0x3:
- return IB_MTU_1024;
- case 0x4:
- return IB_MTU_2048;
- case 0x5:
- return IB_MTU_4096;
- default:
- ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
- fw_mtu);
- return 0;
- }
-}
-
-static u8 map_number_of_vls(struct ehca_shca *shca, u32 vl_cap)
-{
- switch (vl_cap) {
- case 0x1:
- return 1;
- case 0x2:
- return 2;
- case 0x3:
- return 4;
- case 0x4:
- return 8;
- case 0x5:
- return 15;
- default:
- ehca_err(&shca->ib_device, "invalid Vl Capability: %x.",
- vl_cap);
- return 0;
- }
-}
-
-int ehca_query_port(struct ib_device *ibdev,
- u8 port, struct ib_port_attr *props)
-{
- int ret = 0;
- u64 h_ret;
- struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
- ib_device);
- struct hipz_query_port *rblock;
-
- rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
- if (!rblock) {
- ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
- return -ENOMEM;
- }
-
- h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "Can't query port properties");
- ret = -EINVAL;
- goto query_port1;
- }
-
- memset(props, 0, sizeof(struct ib_port_attr));
-
- props->active_mtu = props->max_mtu = map_mtu(shca, rblock->max_mtu);
- props->port_cap_flags = rblock->capability_mask;
- props->gid_tbl_len = rblock->gid_tbl_len;
- if (rblock->max_msg_sz)
- props->max_msg_sz = rblock->max_msg_sz;
- else
- props->max_msg_sz = 0x1 << 31;
- props->bad_pkey_cntr = rblock->bad_pkey_cntr;
- props->qkey_viol_cntr = rblock->qkey_viol_cntr;
- props->pkey_tbl_len = rblock->pkey_tbl_len;
- props->lid = rblock->lid;
- props->sm_lid = rblock->sm_lid;
- props->lmc = rblock->lmc;
- props->sm_sl = rblock->sm_sl;
- props->subnet_timeout = rblock->subnet_timeout;
- props->init_type_reply = rblock->init_type_reply;
- props->max_vl_num = map_number_of_vls(shca, rblock->vl_cap);
-
- if (rblock->state && rblock->phys_width) {
- props->phys_state = rblock->phys_pstate;
- props->state = rblock->phys_state;
- props->active_width = rblock->phys_width;
- props->active_speed = rblock->phys_speed;
- } else {
- /* old firmware releases don't report physical
- * port info, so use default values
- */
- props->phys_state = 5;
- props->state = rblock->state;
- props->active_width = IB_WIDTH_12X;
- props->active_speed = IB_SPEED_SDR;
- }
-
-query_port1:
- ehca_free_fw_ctrlblock(rblock);
-
- return ret;
-}
-
-int ehca_query_sma_attr(struct ehca_shca *shca,
- u8 port, struct ehca_sma_attr *attr)
-{
- int ret = 0;
- u64 h_ret;
- struct hipz_query_port *rblock;
-
- rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
- if (!rblock) {
- ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
- return -ENOMEM;
- }
-
- h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "Can't query port properties");
- ret = -EINVAL;
- goto query_sma_attr1;
- }
-
- memset(attr, 0, sizeof(struct ehca_sma_attr));
-
- attr->lid = rblock->lid;
- attr->lmc = rblock->lmc;
- attr->sm_sl = rblock->sm_sl;
- attr->sm_lid = rblock->sm_lid;
-
- attr->pkey_tbl_len = rblock->pkey_tbl_len;
- memcpy(attr->pkeys, rblock->pkey_entries, sizeof(attr->pkeys));
-
-query_sma_attr1:
- ehca_free_fw_ctrlblock(rblock);
-
- return ret;
-}
-
-int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
-{
- int ret = 0;
- u64 h_ret;
- struct ehca_shca *shca;
- struct hipz_query_port *rblock;
-
- shca = container_of(ibdev, struct ehca_shca, ib_device);
- if (index > 16) {
- ehca_err(&shca->ib_device, "Invalid index: %x.", index);
- return -EINVAL;
- }
-
- rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
- if (!rblock) {
- ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
- return -ENOMEM;
- }
-
- h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "Can't query port properties");
- ret = -EINVAL;
- goto query_pkey1;
- }
-
- memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16));
-
-query_pkey1:
- ehca_free_fw_ctrlblock(rblock);
-
- return ret;
-}
-
-int ehca_query_gid(struct ib_device *ibdev, u8 port,
- int index, union ib_gid *gid)
-{
- int ret = 0;
- u64 h_ret;
- struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
- ib_device);
- struct hipz_query_port *rblock;
-
- if (index < 0 || index > 255) {
- ehca_err(&shca->ib_device, "Invalid index: %x.", index);
- return -EINVAL;
- }
-
- rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
- if (!rblock) {
- ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
- return -ENOMEM;
- }
-
- h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "Can't query port properties");
- ret = -EINVAL;
- goto query_gid1;
- }
-
- memcpy(&gid->raw[0], &rblock->gid_prefix, sizeof(u64));
- memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64));
-
-query_gid1:
- ehca_free_fw_ctrlblock(rblock);
-
- return ret;
-}
-
-static const u32 allowed_port_caps = (
- IB_PORT_SM | IB_PORT_LED_INFO_SUP | IB_PORT_CM_SUP |
- IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_DEVICE_MGMT_SUP |
- IB_PORT_VENDOR_CLASS_SUP);
-
-int ehca_modify_port(struct ib_device *ibdev,
- u8 port, int port_modify_mask,
- struct ib_port_modify *props)
-{
- int ret = 0;
- struct ehca_shca *shca;
- struct hipz_query_port *rblock;
- u32 cap;
- u64 hret;
-
- shca = container_of(ibdev, struct ehca_shca, ib_device);
- if ((props->set_port_cap_mask | props->clr_port_cap_mask)
- & ~allowed_port_caps) {
- ehca_err(&shca->ib_device, "Non-changeable bits set in masks "
- "set=%x clr=%x allowed=%x", props->set_port_cap_mask,
- props->clr_port_cap_mask, allowed_port_caps);
- return -EINVAL;
- }
-
- if (mutex_lock_interruptible(&shca->modify_mutex))
- return -ERESTARTSYS;
-
- rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
- if (!rblock) {
- ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
- ret = -ENOMEM;
- goto modify_port1;
- }
-
- hret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
- if (hret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "Can't query port properties");
- ret = -EINVAL;
- goto modify_port2;
- }
-
- cap = (rblock->capability_mask | props->set_port_cap_mask)
- & ~props->clr_port_cap_mask;
-
- hret = hipz_h_modify_port(shca->ipz_hca_handle, port,
- cap, props->init_type, port_modify_mask);
- if (hret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "Modify port failed h_ret=%lli",
- hret);
- ret = -EINVAL;
- }
-
-modify_port2:
- ehca_free_fw_ctrlblock(rblock);
-
-modify_port1:
- mutex_unlock(&shca->modify_mutex);
-
- return ret;
-}
diff --git a/drivers/staging/rdma/ehca/ehca_irq.c b/drivers/staging/rdma/ehca/ehca_irq.c
deleted file mode 100644
index 8615d7cf7e01..000000000000
--- a/drivers/staging/rdma/ehca/ehca_irq.c
+++ /dev/null
@@ -1,870 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * Functions for EQs, NEQs and interrupts
- *
- * Authors: Heiko J Schick <schickhj@de.ibm.com>
- * Khadija Souissi <souissi@de.ibm.com>
- * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- * Joachim Fenkes <fenkes@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-#include <linux/smpboot.h>
-
-#include "ehca_classes.h"
-#include "ehca_irq.h"
-#include "ehca_iverbs.h"
-#include "ehca_tools.h"
-#include "hcp_if.h"
-#include "hipz_fns.h"
-#include "ipz_pt_fn.h"
-
-#define EQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1)
-#define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
-#define EQE_EE_IDENTIFIER EHCA_BMASK_IBM( 2, 7)
-#define EQE_CQ_NUMBER EHCA_BMASK_IBM( 8, 31)
-#define EQE_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
-#define EQE_QP_TOKEN EHCA_BMASK_IBM(32, 63)
-#define EQE_CQ_TOKEN EHCA_BMASK_IBM(32, 63)
-
-#define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1)
-#define NEQE_EVENT_CODE EHCA_BMASK_IBM( 2, 7)
-#define NEQE_PORT_NUMBER EHCA_BMASK_IBM( 8, 15)
-#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16)
-#define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16, 16)
-#define NEQE_SPECIFIC_EVENT EHCA_BMASK_IBM(16, 23)
-
-#define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52, 63)
-#define ERROR_DATA_TYPE EHCA_BMASK_IBM( 0, 7)
-
-static void queue_comp_task(struct ehca_cq *__cq);
-
-static struct ehca_comp_pool *pool;
-
-static inline void comp_event_callback(struct ehca_cq *cq)
-{
- if (!cq->ib_cq.comp_handler)
- return;
-
- spin_lock(&cq->cb_lock);
- cq->ib_cq.comp_handler(&cq->ib_cq, cq->ib_cq.cq_context);
- spin_unlock(&cq->cb_lock);
-
- return;
-}
-
-static void print_error_data(struct ehca_shca *shca, void *data,
- u64 *rblock, int length)
-{
- u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
- u64 resource = rblock[1];
-
- switch (type) {
- case 0x1: /* Queue Pair */
- {
- struct ehca_qp *qp = (struct ehca_qp *)data;
-
- /* only print error data if AER is set */
- if (rblock[6] == 0)
- return;
-
- ehca_err(&shca->ib_device,
- "QP 0x%x (resource=%llx) has errors.",
- qp->ib_qp.qp_num, resource);
- break;
- }
- case 0x4: /* Completion Queue */
- {
- struct ehca_cq *cq = (struct ehca_cq *)data;
-
- ehca_err(&shca->ib_device,
- "CQ 0x%x (resource=%llx) has errors.",
- cq->cq_number, resource);
- break;
- }
- default:
- ehca_err(&shca->ib_device,
- "Unknown error type: %llx on %s.",
- type, shca->ib_device.name);
- break;
- }
-
- ehca_err(&shca->ib_device, "Error data is available: %llx.", resource);
- ehca_err(&shca->ib_device, "EHCA ----- error data begin "
- "---------------------------------------------------");
- ehca_dmp(rblock, length, "resource=%llx", resource);
- ehca_err(&shca->ib_device, "EHCA ----- error data end "
- "----------------------------------------------------");
-
- return;
-}
-
-int ehca_error_data(struct ehca_shca *shca, void *data,
- u64 resource)
-{
-
- unsigned long ret;
- u64 *rblock;
- unsigned long block_count;
-
- rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
- if (!rblock) {
- ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
- ret = -ENOMEM;
- goto error_data1;
- }
-
- /* rblock must be 4K aligned and should be 4K large */
- ret = hipz_h_error_data(shca->ipz_hca_handle,
- resource,
- rblock,
- &block_count);
-
- if (ret == H_R_STATE)
- ehca_err(&shca->ib_device,
- "No error data is available: %llx.", resource);
- else if (ret == H_SUCCESS) {
- int length;
-
- length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]);
-
- if (length > EHCA_PAGESIZE)
- length = EHCA_PAGESIZE;
-
- print_error_data(shca, data, rblock, length);
- } else
- ehca_err(&shca->ib_device,
- "Error data could not be fetched: %llx", resource);
-
- ehca_free_fw_ctrlblock(rblock);
-
-error_data1:
- return ret;
-
-}
-
-static void dispatch_qp_event(struct ehca_shca *shca, struct ehca_qp *qp,
- enum ib_event_type event_type)
-{
- struct ib_event event;
-
- /* PATH_MIG without the QP ever having been armed is false alarm */
- if (event_type == IB_EVENT_PATH_MIG && !qp->mig_armed)
- return;
-
- event.device = &shca->ib_device;
- event.event = event_type;
-
- if (qp->ext_type == EQPT_SRQ) {
- if (!qp->ib_srq.event_handler)
- return;
-
- event.element.srq = &qp->ib_srq;
- qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context);
- } else {
- if (!qp->ib_qp.event_handler)
- return;
-
- event.element.qp = &qp->ib_qp;
- qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
- }
-}
-
-static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
- enum ib_event_type event_type, int fatal)
-{
- struct ehca_qp *qp;
- u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
-
- read_lock(&ehca_qp_idr_lock);
- qp = idr_find(&ehca_qp_idr, token);
- if (qp)
- atomic_inc(&qp->nr_events);
- read_unlock(&ehca_qp_idr_lock);
-
- if (!qp)
- return;
-
- if (fatal)
- ehca_error_data(shca, qp, qp->ipz_qp_handle.handle);
-
- dispatch_qp_event(shca, qp, fatal && qp->ext_type == EQPT_SRQ ?
- IB_EVENT_SRQ_ERR : event_type);
-
- /*
- * eHCA only processes one WQE at a time for SRQ base QPs,
- * so the last WQE has been processed as soon as the QP enters
- * error state.
- */
- if (fatal && qp->ext_type == EQPT_SRQBASE)
- dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED);
-
- if (atomic_dec_and_test(&qp->nr_events))
- wake_up(&qp->wait_completion);
- return;
-}
-
-static void cq_event_callback(struct ehca_shca *shca,
- u64 eqe)
-{
- struct ehca_cq *cq;
- u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);
-
- read_lock(&ehca_cq_idr_lock);
- cq = idr_find(&ehca_cq_idr, token);
- if (cq)
- atomic_inc(&cq->nr_events);
- read_unlock(&ehca_cq_idr_lock);
-
- if (!cq)
- return;
-
- ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);
-
- if (atomic_dec_and_test(&cq->nr_events))
- wake_up(&cq->wait_completion);
-
- return;
-}
-
-static void parse_identifier(struct ehca_shca *shca, u64 eqe)
-{
- u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe);
-
- switch (identifier) {
- case 0x02: /* path migrated */
- qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG, 0);
- break;
- case 0x03: /* communication established */
- qp_event_callback(shca, eqe, IB_EVENT_COMM_EST, 0);
- break;
- case 0x04: /* send queue drained */
- qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED, 0);
- break;
- case 0x05: /* QP error */
- case 0x06: /* QP error */
- qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL, 1);
- break;
- case 0x07: /* CQ error */
- case 0x08: /* CQ error */
- cq_event_callback(shca, eqe);
- break;
- case 0x09: /* MRMWPTE error */
- ehca_err(&shca->ib_device, "MRMWPTE error.");
- break;
- case 0x0A: /* port event */
- ehca_err(&shca->ib_device, "Port event.");
- break;
- case 0x0B: /* MR access error */
- ehca_err(&shca->ib_device, "MR access error.");
- break;
- case 0x0C: /* EQ error */
- ehca_err(&shca->ib_device, "EQ error.");
- break;
- case 0x0D: /* P/Q_Key mismatch */
- ehca_err(&shca->ib_device, "P/Q_Key mismatch.");
- break;
- case 0x10: /* sampling complete */
- ehca_err(&shca->ib_device, "Sampling complete.");
- break;
- case 0x11: /* unaffiliated access error */
- ehca_err(&shca->ib_device, "Unaffiliated access error.");
- break;
- case 0x12: /* path migrating */
- ehca_err(&shca->ib_device, "Path migrating.");
- break;
- case 0x13: /* interface trace stopped */
- ehca_err(&shca->ib_device, "Interface trace stopped.");
- break;
- case 0x14: /* first error capture info available */
- ehca_info(&shca->ib_device, "First error capture available");
- break;
- case 0x15: /* SRQ limit reached */
- qp_event_callback(shca, eqe, IB_EVENT_SRQ_LIMIT_REACHED, 0);
- break;
- default:
- ehca_err(&shca->ib_device, "Unknown identifier: %x on %s.",
- identifier, shca->ib_device.name);
- break;
- }
-
- return;
-}
-
-static void dispatch_port_event(struct ehca_shca *shca, int port_num,
- enum ib_event_type type, const char *msg)
-{
- struct ib_event event;
-
- ehca_info(&shca->ib_device, "port %d %s.", port_num, msg);
- event.device = &shca->ib_device;
- event.event = type;
- event.element.port_num = port_num;
- ib_dispatch_event(&event);
-}
-
-static void notify_port_conf_change(struct ehca_shca *shca, int port_num)
-{
- struct ehca_sma_attr new_attr;
- struct ehca_sma_attr *old_attr = &shca->sport[port_num - 1].saved_attr;
-
- ehca_query_sma_attr(shca, port_num, &new_attr);
-
- if (new_attr.sm_sl != old_attr->sm_sl ||
- new_attr.sm_lid != old_attr->sm_lid)
- dispatch_port_event(shca, port_num, IB_EVENT_SM_CHANGE,
- "SM changed");
-
- if (new_attr.lid != old_attr->lid ||
- new_attr.lmc != old_attr->lmc)
- dispatch_port_event(shca, port_num, IB_EVENT_LID_CHANGE,
- "LID changed");
-
- if (new_attr.pkey_tbl_len != old_attr->pkey_tbl_len ||
- memcmp(new_attr.pkeys, old_attr->pkeys,
- sizeof(u16) * new_attr.pkey_tbl_len))
- dispatch_port_event(shca, port_num, IB_EVENT_PKEY_CHANGE,
- "P_Key changed");
-
- *old_attr = new_attr;
-}
-
-/* replay modify_qp for sqps -- return 0 if all is well, 1 if AQP1 destroyed */
-static int replay_modify_qp(struct ehca_sport *sport)
-{
- int aqp1_destroyed;
- unsigned long flags;
-
- spin_lock_irqsave(&sport->mod_sqp_lock, flags);
-
- aqp1_destroyed = !sport->ibqp_sqp[IB_QPT_GSI];
-
- if (sport->ibqp_sqp[IB_QPT_SMI])
- ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]);
- if (!aqp1_destroyed)
- ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]);
-
- spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
-
- return aqp1_destroyed;
-}
-
-static void parse_ec(struct ehca_shca *shca, u64 eqe)
-{
- u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
- u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
- u8 spec_event;
- struct ehca_sport *sport = &shca->sport[port - 1];
-
- switch (ec) {
- case 0x30: /* port availability change */
- if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
- /* only replay modify_qp calls in autodetect mode;
- * if AQP1 was destroyed, the port is already down
- * again and we can drop the event.
- */
- if (ehca_nr_ports < 0)
- if (replay_modify_qp(sport))
- break;
-
- sport->port_state = IB_PORT_ACTIVE;
- dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
- "is active");
- ehca_query_sma_attr(shca, port, &sport->saved_attr);
- } else {
- sport->port_state = IB_PORT_DOWN;
- dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
- "is inactive");
- }
- break;
- case 0x31:
- /* port configuration change
- * disruptive change is caused by
- * LID, PKEY or SM change
- */
- if (EHCA_BMASK_GET(NEQE_DISRUPTIVE, eqe)) {
- ehca_warn(&shca->ib_device, "disruptive port "
- "%d configuration change", port);
-
- sport->port_state = IB_PORT_DOWN;
- dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
- "is inactive");
-
- sport->port_state = IB_PORT_ACTIVE;
- dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
- "is active");
- ehca_query_sma_attr(shca, port,
- &sport->saved_attr);
- } else
- notify_port_conf_change(shca, port);
- break;
- case 0x32: /* adapter malfunction */
- ehca_err(&shca->ib_device, "Adapter malfunction.");
- break;
- case 0x33: /* trace stopped */
- ehca_err(&shca->ib_device, "Traced stopped.");
- break;
- case 0x34: /* util async event */
- spec_event = EHCA_BMASK_GET(NEQE_SPECIFIC_EVENT, eqe);
- if (spec_event == 0x80) /* client reregister required */
- dispatch_port_event(shca, port,
- IB_EVENT_CLIENT_REREGISTER,
- "client reregister req.");
- else
- ehca_warn(&shca->ib_device, "Unknown util async "
- "event %x on port %x", spec_event, port);
- break;
- default:
- ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
- ec, shca->ib_device.name);
- break;
- }
-
- return;
-}
-
-static inline void reset_eq_pending(struct ehca_cq *cq)
-{
- u64 CQx_EP;
- struct h_galpa gal = cq->galpas.kernel;
-
- hipz_galpa_store_cq(gal, cqx_ep, 0x0);
- CQx_EP = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_ep));
-
- return;
-}
-
-irqreturn_t ehca_interrupt_neq(int irq, void *dev_id)
-{
- struct ehca_shca *shca = (struct ehca_shca*)dev_id;
-
- tasklet_hi_schedule(&shca->neq.interrupt_task);
-
- return IRQ_HANDLED;
-}
-
-void ehca_tasklet_neq(unsigned long data)
-{
- struct ehca_shca *shca = (struct ehca_shca*)data;
- struct ehca_eqe *eqe;
- u64 ret;
-
- eqe = ehca_poll_eq(shca, &shca->neq);
-
- while (eqe) {
- if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry))
- parse_ec(shca, eqe->entry);
-
- eqe = ehca_poll_eq(shca, &shca->neq);
- }
-
- ret = hipz_h_reset_event(shca->ipz_hca_handle,
- shca->neq.ipz_eq_handle, 0xFFFFFFFFFFFFFFFFL);
-
- if (ret != H_SUCCESS)
- ehca_err(&shca->ib_device, "Can't clear notification events.");
-
- return;
-}
-
-irqreturn_t ehca_interrupt_eq(int irq, void *dev_id)
-{
- struct ehca_shca *shca = (struct ehca_shca*)dev_id;
-
- tasklet_hi_schedule(&shca->eq.interrupt_task);
-
- return IRQ_HANDLED;
-}
-
-
-static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
-{
- u64 eqe_value;
- u32 token;
- struct ehca_cq *cq;
-
- eqe_value = eqe->entry;
- ehca_dbg(&shca->ib_device, "eqe_value=%llx", eqe_value);
- if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
- ehca_dbg(&shca->ib_device, "Got completion event");
- token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
- read_lock(&ehca_cq_idr_lock);
- cq = idr_find(&ehca_cq_idr, token);
- if (cq)
- atomic_inc(&cq->nr_events);
- read_unlock(&ehca_cq_idr_lock);
- if (cq == NULL) {
- ehca_err(&shca->ib_device,
- "Invalid eqe for non-existing cq token=%x",
- token);
- return;
- }
- reset_eq_pending(cq);
- if (ehca_scaling_code)
- queue_comp_task(cq);
- else {
- comp_event_callback(cq);
- if (atomic_dec_and_test(&cq->nr_events))
- wake_up(&cq->wait_completion);
- }
- } else {
- ehca_dbg(&shca->ib_device, "Got non completion event");
- parse_identifier(shca, eqe_value);
- }
-}
-
-void ehca_process_eq(struct ehca_shca *shca, int is_irq)
-{
- struct ehca_eq *eq = &shca->eq;
- struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
- u64 eqe_value, ret;
- int eqe_cnt, i;
- int eq_empty = 0;
-
- spin_lock(&eq->irq_spinlock);
- if (is_irq) {
- const int max_query_cnt = 100;
- int query_cnt = 0;
- int int_state = 1;
- do {
- int_state = hipz_h_query_int_state(
- shca->ipz_hca_handle, eq->ist);
- query_cnt++;
- iosync();
- } while (int_state && query_cnt < max_query_cnt);
- if (unlikely((query_cnt == max_query_cnt)))
- ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x",
- int_state, query_cnt);
- }
-
- /* read out all eqes */
- eqe_cnt = 0;
- do {
- u32 token;
- eqe_cache[eqe_cnt].eqe = ehca_poll_eq(shca, eq);
- if (!eqe_cache[eqe_cnt].eqe)
- break;
- eqe_value = eqe_cache[eqe_cnt].eqe->entry;
- if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
- token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
- read_lock(&ehca_cq_idr_lock);
- eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
- if (eqe_cache[eqe_cnt].cq)
- atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
- read_unlock(&ehca_cq_idr_lock);
- if (!eqe_cache[eqe_cnt].cq) {
- ehca_err(&shca->ib_device,
- "Invalid eqe for non-existing cq "
- "token=%x", token);
- continue;
- }
- } else
- eqe_cache[eqe_cnt].cq = NULL;
- eqe_cnt++;
- } while (eqe_cnt < EHCA_EQE_CACHE_SIZE);
- if (!eqe_cnt) {
- if (is_irq)
- ehca_dbg(&shca->ib_device,
- "No eqe found for irq event");
- goto unlock_irq_spinlock;
- } else if (!is_irq) {
- ret = hipz_h_eoi(eq->ist);
- if (ret != H_SUCCESS)
- ehca_err(&shca->ib_device,
- "bad return code EOI -rc = %lld\n", ret);
- ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
- }
- if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
- ehca_dbg(&shca->ib_device, "too many eqes for one irq event");
- /* enable irq for new packets */
- for (i = 0; i < eqe_cnt; i++) {
- if (eq->eqe_cache[i].cq)
- reset_eq_pending(eq->eqe_cache[i].cq);
- }
- /* check eq */
- spin_lock(&eq->spinlock);
- eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue));
- spin_unlock(&eq->spinlock);
- /* call completion handler for cached eqes */
- for (i = 0; i < eqe_cnt; i++)
- if (eq->eqe_cache[i].cq) {
- if (ehca_scaling_code)
- queue_comp_task(eq->eqe_cache[i].cq);
- else {
- struct ehca_cq *cq = eq->eqe_cache[i].cq;
- comp_event_callback(cq);
- if (atomic_dec_and_test(&cq->nr_events))
- wake_up(&cq->wait_completion);
- }
- } else {
- ehca_dbg(&shca->ib_device, "Got non completion event");
- parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
- }
- /* poll eq if not empty */
- if (eq_empty)
- goto unlock_irq_spinlock;
- do {
- struct ehca_eqe *eqe;
- eqe = ehca_poll_eq(shca, &shca->eq);
- if (!eqe)
- break;
- process_eqe(shca, eqe);
- } while (1);
-
-unlock_irq_spinlock:
- spin_unlock(&eq->irq_spinlock);
-}
-
-void ehca_tasklet_eq(unsigned long data)
-{
- ehca_process_eq((struct ehca_shca*)data, 1);
-}
-
-static int find_next_online_cpu(struct ehca_comp_pool *pool)
-{
- int cpu;
- unsigned long flags;
-
- WARN_ON_ONCE(!in_interrupt());
- if (ehca_debug_level >= 3)
- ehca_dmp(cpu_online_mask, cpumask_size(), "");
-
- spin_lock_irqsave(&pool->last_cpu_lock, flags);
- do {
- cpu = cpumask_next(pool->last_cpu, cpu_online_mask);
- if (cpu >= nr_cpu_ids)
- cpu = cpumask_first(cpu_online_mask);
- pool->last_cpu = cpu;
- } while (!per_cpu_ptr(pool->cpu_comp_tasks, cpu)->active);
- spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
-
- return cpu;
-}
-
-static void __queue_comp_task(struct ehca_cq *__cq,
- struct ehca_cpu_comp_task *cct,
- struct task_struct *thread)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cct->task_lock, flags);
- spin_lock(&__cq->task_lock);
-
- if (__cq->nr_callbacks == 0) {
- __cq->nr_callbacks++;
- list_add_tail(&__cq->entry, &cct->cq_list);
- cct->cq_jobs++;
- wake_up_process(thread);
- } else
- __cq->nr_callbacks++;
-
- spin_unlock(&__cq->task_lock);
- spin_unlock_irqrestore(&cct->task_lock, flags);
-}
-
-static void queue_comp_task(struct ehca_cq *__cq)
-{
- int cpu_id;
- struct ehca_cpu_comp_task *cct;
- struct task_struct *thread;
- int cq_jobs;
- unsigned long flags;
-
- cpu_id = find_next_online_cpu(pool);
- BUG_ON(!cpu_online(cpu_id));
-
- cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
- thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id);
- BUG_ON(!cct || !thread);
-
- spin_lock_irqsave(&cct->task_lock, flags);
- cq_jobs = cct->cq_jobs;
- spin_unlock_irqrestore(&cct->task_lock, flags);
- if (cq_jobs > 0) {
- cpu_id = find_next_online_cpu(pool);
- cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
- thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id);
- BUG_ON(!cct || !thread);
- }
- __queue_comp_task(__cq, cct, thread);
-}
-
-static void run_comp_task(struct ehca_cpu_comp_task *cct)
-{
- struct ehca_cq *cq;
-
- while (!list_empty(&cct->cq_list)) {
- cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
- spin_unlock_irq(&cct->task_lock);
-
- comp_event_callback(cq);
- if (atomic_dec_and_test(&cq->nr_events))
- wake_up(&cq->wait_completion);
-
- spin_lock_irq(&cct->task_lock);
- spin_lock(&cq->task_lock);
- cq->nr_callbacks--;
- if (!cq->nr_callbacks) {
- list_del_init(cct->cq_list.next);
- cct->cq_jobs--;
- }
- spin_unlock(&cq->task_lock);
- }
-}
-
-static void comp_task_park(unsigned int cpu)
-{
- struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
- struct ehca_cpu_comp_task *target;
- struct task_struct *thread;
- struct ehca_cq *cq, *tmp;
- LIST_HEAD(list);
-
- spin_lock_irq(&cct->task_lock);
- cct->cq_jobs = 0;
- cct->active = 0;
- list_splice_init(&cct->cq_list, &list);
- spin_unlock_irq(&cct->task_lock);
-
- cpu = find_next_online_cpu(pool);
- target = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
- thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu);
- spin_lock_irq(&target->task_lock);
- list_for_each_entry_safe(cq, tmp, &list, entry) {
- list_del(&cq->entry);
- __queue_comp_task(cq, target, thread);
- }
- spin_unlock_irq(&target->task_lock);
-}
-
-static void comp_task_stop(unsigned int cpu, bool online)
-{
- struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
-
- spin_lock_irq(&cct->task_lock);
- cct->cq_jobs = 0;
- cct->active = 0;
- WARN_ON(!list_empty(&cct->cq_list));
- spin_unlock_irq(&cct->task_lock);
-}
-
-static int comp_task_should_run(unsigned int cpu)
-{
- struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
-
- return cct->cq_jobs;
-}
-
-static void comp_task(unsigned int cpu)
-{
- struct ehca_cpu_comp_task *cct = this_cpu_ptr(pool->cpu_comp_tasks);
- int cql_empty;
-
- spin_lock_irq(&cct->task_lock);
- cql_empty = list_empty(&cct->cq_list);
- if (!cql_empty) {
- __set_current_state(TASK_RUNNING);
- run_comp_task(cct);
- }
- spin_unlock_irq(&cct->task_lock);
-}
-
-static struct smp_hotplug_thread comp_pool_threads = {
- .thread_should_run = comp_task_should_run,
- .thread_fn = comp_task,
- .thread_comm = "ehca_comp/%u",
- .cleanup = comp_task_stop,
- .park = comp_task_park,
-};
-
-int ehca_create_comp_pool(void)
-{
- int cpu, ret = -ENOMEM;
-
- if (!ehca_scaling_code)
- return 0;
-
- pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL);
- if (pool == NULL)
- return -ENOMEM;
-
- spin_lock_init(&pool->last_cpu_lock);
- pool->last_cpu = cpumask_any(cpu_online_mask);
-
- pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
- if (!pool->cpu_comp_tasks)
- goto out_pool;
-
- pool->cpu_comp_threads = alloc_percpu(struct task_struct *);
- if (!pool->cpu_comp_threads)
- goto out_tasks;
-
- for_each_present_cpu(cpu) {
- struct ehca_cpu_comp_task *cct;
-
- cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
- spin_lock_init(&cct->task_lock);
- INIT_LIST_HEAD(&cct->cq_list);
- }
-
- comp_pool_threads.store = pool->cpu_comp_threads;
- ret = smpboot_register_percpu_thread(&comp_pool_threads);
- if (ret)
- goto out_threads;
-
- pr_info("eHCA scaling code enabled\n");
- return ret;
-
-out_threads:
- free_percpu(pool->cpu_comp_threads);
-out_tasks:
- free_percpu(pool->cpu_comp_tasks);
-out_pool:
- kfree(pool);
- return ret;
-}
-
-void ehca_destroy_comp_pool(void)
-{
- if (!ehca_scaling_code)
- return;
-
- smpboot_unregister_percpu_thread(&comp_pool_threads);
-
- free_percpu(pool->cpu_comp_threads);
- free_percpu(pool->cpu_comp_tasks);
- kfree(pool);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_irq.h b/drivers/staging/rdma/ehca/ehca_irq.h
deleted file mode 100644
index 5370199f08c7..000000000000
--- a/drivers/staging/rdma/ehca/ehca_irq.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * Function definitions and structs for EQs, NEQs and interrupts
- *
- * Authors: Heiko J Schick <schickhj@de.ibm.com>
- * Khadija Souissi <souissi@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __EHCA_IRQ_H
-#define __EHCA_IRQ_H
-
-
-struct ehca_shca;
-
-#include <linux/interrupt.h>
-#include <linux/types.h>
-
-int ehca_error_data(struct ehca_shca *shca, void *data, u64 resource);
-
-irqreturn_t ehca_interrupt_neq(int irq, void *dev_id);
-void ehca_tasklet_neq(unsigned long data);
-
-irqreturn_t ehca_interrupt_eq(int irq, void *dev_id);
-void ehca_tasklet_eq(unsigned long data);
-void ehca_process_eq(struct ehca_shca *shca, int is_irq);
-
-struct ehca_cpu_comp_task {
- struct list_head cq_list;
- spinlock_t task_lock;
- int cq_jobs;
- int active;
-};
-
-struct ehca_comp_pool {
- struct ehca_cpu_comp_task __percpu *cpu_comp_tasks;
- struct task_struct * __percpu *cpu_comp_threads;
- int last_cpu;
- spinlock_t last_cpu_lock;
-};
-
-int ehca_create_comp_pool(void);
-void ehca_destroy_comp_pool(void);
-
-#endif
diff --git a/drivers/staging/rdma/ehca/ehca_iverbs.h b/drivers/staging/rdma/ehca/ehca_iverbs.h
deleted file mode 100644
index cca5933fcda6..000000000000
--- a/drivers/staging/rdma/ehca/ehca_iverbs.h
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * Function definitions for internal functions
- *
- * Authors: Heiko J Schick <schickhj@de.ibm.com>
- * Dietmar Decker <ddecker@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __EHCA_IVERBS_H__
-#define __EHCA_IVERBS_H__
-
-#include "ehca_classes.h"
-
-int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
- struct ib_udata *uhw);
-
-int ehca_query_port(struct ib_device *ibdev, u8 port,
- struct ib_port_attr *props);
-
-enum rdma_protocol_type
-ehca_query_protocol(struct ib_device *device, u8 port_num);
-
-int ehca_query_sma_attr(struct ehca_shca *shca, u8 port,
- struct ehca_sma_attr *attr);
-
-int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 * pkey);
-
-int ehca_query_gid(struct ib_device *ibdev, u8 port, int index,
- union ib_gid *gid);
-
-int ehca_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask,
- struct ib_port_modify *props);
-
-struct ib_pd *ehca_alloc_pd(struct ib_device *device,
- struct ib_ucontext *context,
- struct ib_udata *udata);
-
-int ehca_dealloc_pd(struct ib_pd *pd);
-
-struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
-
-int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
-
-int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
-
-int ehca_destroy_ah(struct ib_ah *ah);
-
-struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
-
-struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt, int mr_access_flags,
- struct ib_udata *udata);
-
-int ehca_dereg_mr(struct ib_mr *mr);
-
-struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
-
-int ehca_dealloc_mw(struct ib_mw *mw);
-
-struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
- int mr_access_flags,
- struct ib_fmr_attr *fmr_attr);
-
-int ehca_map_phys_fmr(struct ib_fmr *fmr,
- u64 *page_list, int list_len, u64 iova);
-
-int ehca_unmap_fmr(struct list_head *fmr_list);
-
-int ehca_dealloc_fmr(struct ib_fmr *fmr);
-
-enum ehca_eq_type {
- EHCA_EQ = 0, /* Event Queue */
- EHCA_NEQ /* Notification Event Queue */
-};
-
-int ehca_create_eq(struct ehca_shca *shca, struct ehca_eq *eq,
- enum ehca_eq_type type, const u32 length);
-
-int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq);
-
-void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq);
-
-
-struct ib_cq *ehca_create_cq(struct ib_device *device,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata);
-
-int ehca_destroy_cq(struct ib_cq *cq);
-
-int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
-
-int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
-
-int ehca_peek_cq(struct ib_cq *cq, int wc_cnt);
-
-int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags);
-
-struct ib_qp *ehca_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata);
-
-int ehca_destroy_qp(struct ib_qp *qp);
-
-int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
- struct ib_udata *udata);
-
-int ehca_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
- int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
-
-int ehca_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr,
- struct ib_send_wr **bad_send_wr);
-
-int ehca_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
- struct ib_recv_wr **bad_recv_wr);
-
-int ehca_post_srq_recv(struct ib_srq *srq,
- struct ib_recv_wr *recv_wr,
- struct ib_recv_wr **bad_recv_wr);
-
-struct ib_srq *ehca_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *init_attr,
- struct ib_udata *udata);
-
-int ehca_modify_srq(struct ib_srq *srq, struct ib_srq_attr *attr,
- enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
-
-int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
-
-int ehca_destroy_srq(struct ib_srq *srq);
-
-u64 ehca_define_sqp(struct ehca_shca *shca, struct ehca_qp *ibqp,
- struct ib_qp_init_attr *qp_init_attr);
-
-int ehca_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
-
-int ehca_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
-
-struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
- struct ib_udata *udata);
-
-int ehca_dealloc_ucontext(struct ib_ucontext *context);
-
-int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
-
-int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
- const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
-
-void ehca_poll_eqs(unsigned long data);
-
-int ehca_calc_ipd(struct ehca_shca *shca, int port,
- enum ib_rate path_rate, u32 *ipd);
-
-void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq);
-
-#ifdef CONFIG_PPC_64K_PAGES
-void *ehca_alloc_fw_ctrlblock(gfp_t flags);
-void ehca_free_fw_ctrlblock(void *ptr);
-#else
-#define ehca_alloc_fw_ctrlblock(flags) ((void *)get_zeroed_page(flags))
-#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
-#endif
-
-void ehca_recover_sqp(struct ib_qp *sqp);
-
-#endif
diff --git a/drivers/staging/rdma/ehca/ehca_main.c b/drivers/staging/rdma/ehca/ehca_main.c
deleted file mode 100644
index 832f22f40862..000000000000
--- a/drivers/staging/rdma/ehca/ehca_main.c
+++ /dev/null
@@ -1,1118 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * module start stop, hca detection
- *
- * Authors: Heiko J Schick <schickhj@de.ibm.com>
- * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- * Joachim Fenkes <fenkes@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifdef CONFIG_PPC_64K_PAGES
-#include <linux/slab.h>
-#endif
-
-#include <linux/notifier.h>
-#include <linux/memory.h>
-#include <rdma/ib_mad.h>
-#include "ehca_classes.h"
-#include "ehca_iverbs.h"
-#include "ehca_mrmw.h"
-#include "ehca_tools.h"
-#include "hcp_if.h"
-
-#define HCAD_VERSION "0029"
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
-MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
-MODULE_VERSION(HCAD_VERSION);
-
-static bool ehca_open_aqp1 = 0;
-static int ehca_hw_level = 0;
-static bool ehca_poll_all_eqs = 1;
-
-int ehca_debug_level = 0;
-int ehca_nr_ports = -1;
-bool ehca_use_hp_mr = 0;
-int ehca_port_act_time = 30;
-int ehca_static_rate = -1;
-bool ehca_scaling_code = 0;
-int ehca_lock_hcalls = -1;
-int ehca_max_cq = -1;
-int ehca_max_qp = -1;
-
-module_param_named(open_aqp1, ehca_open_aqp1, bool, S_IRUGO);
-module_param_named(debug_level, ehca_debug_level, int, S_IRUGO);
-module_param_named(hw_level, ehca_hw_level, int, S_IRUGO);
-module_param_named(nr_ports, ehca_nr_ports, int, S_IRUGO);
-module_param_named(use_hp_mr, ehca_use_hp_mr, bool, S_IRUGO);
-module_param_named(port_act_time, ehca_port_act_time, int, S_IRUGO);
-module_param_named(poll_all_eqs, ehca_poll_all_eqs, bool, S_IRUGO);
-module_param_named(static_rate, ehca_static_rate, int, S_IRUGO);
-module_param_named(scaling_code, ehca_scaling_code, bool, S_IRUGO);
-module_param_named(lock_hcalls, ehca_lock_hcalls, bint, S_IRUGO);
-module_param_named(number_of_cqs, ehca_max_cq, int, S_IRUGO);
-module_param_named(number_of_qps, ehca_max_qp, int, S_IRUGO);
-
-MODULE_PARM_DESC(open_aqp1,
- "Open AQP1 on startup (default: no)");
-MODULE_PARM_DESC(debug_level,
- "Amount of debug output (0: none (default), 1: traces, "
- "2: some dumps, 3: lots)");
-MODULE_PARM_DESC(hw_level,
- "Hardware level (0: autosensing (default), "
- "0x10..0x14: eHCA, 0x20..0x23: eHCA2)");
-MODULE_PARM_DESC(nr_ports,
- "number of connected ports (-1: autodetect (default), "
- "1: port one only, 2: two ports)");
-MODULE_PARM_DESC(use_hp_mr,
- "Use high performance MRs (default: no)");
-MODULE_PARM_DESC(port_act_time,
- "Time to wait for port activation (default: 30 sec)");
-MODULE_PARM_DESC(poll_all_eqs,
- "Poll all event queues periodically (default: yes)");
-MODULE_PARM_DESC(static_rate,
- "Set permanent static rate (default: no static rate)");
-MODULE_PARM_DESC(scaling_code,
- "Enable scaling code (default: no)");
-MODULE_PARM_DESC(lock_hcalls,
- "Serialize all hCalls made by the driver "
- "(default: autodetect)");
-MODULE_PARM_DESC(number_of_cqs,
- "Max number of CQs which can be allocated "
- "(default: autodetect)");
-MODULE_PARM_DESC(number_of_qps,
- "Max number of QPs which can be allocated "
- "(default: autodetect)");
-
-DEFINE_RWLOCK(ehca_qp_idr_lock);
-DEFINE_RWLOCK(ehca_cq_idr_lock);
-DEFINE_IDR(ehca_qp_idr);
-DEFINE_IDR(ehca_cq_idr);
-
-static LIST_HEAD(shca_list); /* list of all registered ehcas */
-DEFINE_SPINLOCK(shca_list_lock);
-
-static struct timer_list poll_eqs_timer;
-
-#ifdef CONFIG_PPC_64K_PAGES
-static struct kmem_cache *ctblk_cache;
-
-void *ehca_alloc_fw_ctrlblock(gfp_t flags)
-{
- void *ret = kmem_cache_zalloc(ctblk_cache, flags);
- if (!ret)
- ehca_gen_err("Out of memory for ctblk");
- return ret;
-}
-
-void ehca_free_fw_ctrlblock(void *ptr)
-{
- if (ptr)
- kmem_cache_free(ctblk_cache, ptr);
-
-}
-#endif
-
-int ehca2ib_return_code(u64 ehca_rc)
-{
- switch (ehca_rc) {
- case H_SUCCESS:
- return 0;
- case H_RESOURCE: /* Resource in use */
- case H_BUSY:
- return -EBUSY;
- case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
- case H_CONSTRAINED: /* resource constraint */
- case H_NO_MEM:
- return -ENOMEM;
- default:
- return -EINVAL;
- }
-}
-
-static int ehca_create_slab_caches(void)
-{
- int ret;
-
- ret = ehca_init_pd_cache();
- if (ret) {
- ehca_gen_err("Cannot create PD SLAB cache.");
- return ret;
- }
-
- ret = ehca_init_cq_cache();
- if (ret) {
- ehca_gen_err("Cannot create CQ SLAB cache.");
- goto create_slab_caches2;
- }
-
- ret = ehca_init_qp_cache();
- if (ret) {
- ehca_gen_err("Cannot create QP SLAB cache.");
- goto create_slab_caches3;
- }
-
- ret = ehca_init_av_cache();
- if (ret) {
- ehca_gen_err("Cannot create AV SLAB cache.");
- goto create_slab_caches4;
- }
-
- ret = ehca_init_mrmw_cache();
- if (ret) {
- ehca_gen_err("Cannot create MR&MW SLAB cache.");
- goto create_slab_caches5;
- }
-
- ret = ehca_init_small_qp_cache();
- if (ret) {
- ehca_gen_err("Cannot create small queue SLAB cache.");
- goto create_slab_caches6;
- }
-
-#ifdef CONFIG_PPC_64K_PAGES
- ctblk_cache = kmem_cache_create("ehca_cache_ctblk",
- EHCA_PAGESIZE, H_CB_ALIGNMENT,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!ctblk_cache) {
- ehca_gen_err("Cannot create ctblk SLAB cache.");
- ehca_cleanup_small_qp_cache();
- ret = -ENOMEM;
- goto create_slab_caches6;
- }
-#endif
- return 0;
-
-create_slab_caches6:
- ehca_cleanup_mrmw_cache();
-
-create_slab_caches5:
- ehca_cleanup_av_cache();
-
-create_slab_caches4:
- ehca_cleanup_qp_cache();
-
-create_slab_caches3:
- ehca_cleanup_cq_cache();
-
-create_slab_caches2:
- ehca_cleanup_pd_cache();
-
- return ret;
-}
-
-static void ehca_destroy_slab_caches(void)
-{
- ehca_cleanup_small_qp_cache();
- ehca_cleanup_mrmw_cache();
- ehca_cleanup_av_cache();
- ehca_cleanup_qp_cache();
- ehca_cleanup_cq_cache();
- ehca_cleanup_pd_cache();
-#ifdef CONFIG_PPC_64K_PAGES
- kmem_cache_destroy(ctblk_cache);
-#endif
-}
-
-#define EHCA_HCAAVER EHCA_BMASK_IBM(32, 39)
-#define EHCA_REVID EHCA_BMASK_IBM(40, 63)
-
-static struct cap_descr {
- u64 mask;
- char *descr;
-} hca_cap_descr[] = {
- { HCA_CAP_AH_PORT_NR_CHECK, "HCA_CAP_AH_PORT_NR_CHECK" },
- { HCA_CAP_ATOMIC, "HCA_CAP_ATOMIC" },
- { HCA_CAP_AUTO_PATH_MIG, "HCA_CAP_AUTO_PATH_MIG" },
- { HCA_CAP_BAD_P_KEY_CTR, "HCA_CAP_BAD_P_KEY_CTR" },
- { HCA_CAP_SQD_RTS_PORT_CHANGE, "HCA_CAP_SQD_RTS_PORT_CHANGE" },
- { HCA_CAP_CUR_QP_STATE_MOD, "HCA_CAP_CUR_QP_STATE_MOD" },
- { HCA_CAP_INIT_TYPE, "HCA_CAP_INIT_TYPE" },
- { HCA_CAP_PORT_ACTIVE_EVENT, "HCA_CAP_PORT_ACTIVE_EVENT" },
- { HCA_CAP_Q_KEY_VIOL_CTR, "HCA_CAP_Q_KEY_VIOL_CTR" },
- { HCA_CAP_WQE_RESIZE, "HCA_CAP_WQE_RESIZE" },
- { HCA_CAP_RAW_PACKET_MCAST, "HCA_CAP_RAW_PACKET_MCAST" },
- { HCA_CAP_SHUTDOWN_PORT, "HCA_CAP_SHUTDOWN_PORT" },
- { HCA_CAP_RC_LL_QP, "HCA_CAP_RC_LL_QP" },
- { HCA_CAP_SRQ, "HCA_CAP_SRQ" },
- { HCA_CAP_UD_LL_QP, "HCA_CAP_UD_LL_QP" },
- { HCA_CAP_RESIZE_MR, "HCA_CAP_RESIZE_MR" },
- { HCA_CAP_MINI_QP, "HCA_CAP_MINI_QP" },
- { HCA_CAP_H_ALLOC_RES_SYNC, "HCA_CAP_H_ALLOC_RES_SYNC" },
-};
-
-static int ehca_sense_attributes(struct ehca_shca *shca)
-{
- int i, ret = 0;
- u64 h_ret;
- struct hipz_query_hca *rblock;
- struct hipz_query_port *port;
- const char *loc_code;
-
- static const u32 pgsize_map[] = {
- HCA_CAP_MR_PGSIZE_4K, 0x1000,
- HCA_CAP_MR_PGSIZE_64K, 0x10000,
- HCA_CAP_MR_PGSIZE_1M, 0x100000,
- HCA_CAP_MR_PGSIZE_16M, 0x1000000,
- };
-
- ehca_gen_dbg("Probing adapter %s...",
- shca->ofdev->dev.of_node->full_name);
- loc_code = of_get_property(shca->ofdev->dev.of_node, "ibm,loc-code",
- NULL);
- if (loc_code)
- ehca_gen_dbg(" ... location lode=%s", loc_code);
-
- rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
- if (!rblock) {
- ehca_gen_err("Cannot allocate rblock memory.");
- return -ENOMEM;
- }
-
- h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock);
- if (h_ret != H_SUCCESS) {
- ehca_gen_err("Cannot query device properties. h_ret=%lli",
- h_ret);
- ret = -EPERM;
- goto sense_attributes1;
- }
-
- if (ehca_nr_ports == 1)
- shca->num_ports = 1;
- else
- shca->num_ports = (u8)rblock->num_ports;
-
- ehca_gen_dbg(" ... found %x ports", rblock->num_ports);
-
- if (ehca_hw_level == 0) {
- u32 hcaaver;
- u32 revid;
-
- hcaaver = EHCA_BMASK_GET(EHCA_HCAAVER, rblock->hw_ver);
- revid = EHCA_BMASK_GET(EHCA_REVID, rblock->hw_ver);
-
- ehca_gen_dbg(" ... hardware version=%x:%x", hcaaver, revid);
-
- if (hcaaver == 1) {
- if (revid <= 3)
- shca->hw_level = 0x10 | (revid + 1);
- else
- shca->hw_level = 0x14;
- } else if (hcaaver == 2) {
- if (revid == 0)
- shca->hw_level = 0x21;
- else if (revid == 0x10)
- shca->hw_level = 0x22;
- else if (revid == 0x20 || revid == 0x21)
- shca->hw_level = 0x23;
- }
-
- if (!shca->hw_level) {
- ehca_gen_warn("unknown hardware version"
- " - assuming default level");
- shca->hw_level = 0x22;
- }
- } else
- shca->hw_level = ehca_hw_level;
- ehca_gen_dbg(" ... hardware level=%x", shca->hw_level);
-
- shca->hca_cap = rblock->hca_cap_indicators;
- ehca_gen_dbg(" ... HCA capabilities:");
- for (i = 0; i < ARRAY_SIZE(hca_cap_descr); i++)
- if (EHCA_BMASK_GET(hca_cap_descr[i].mask, shca->hca_cap))
- ehca_gen_dbg(" %s", hca_cap_descr[i].descr);
-
- /* Autodetect hCall locking -- the "H_ALLOC_RESOURCE synced" flag is
- * a firmware property, so it's valid across all adapters
- */
- if (ehca_lock_hcalls == -1)
- ehca_lock_hcalls = !EHCA_BMASK_GET(HCA_CAP_H_ALLOC_RES_SYNC,
- shca->hca_cap);
-
- /* translate supported MR page sizes; always support 4K */
- shca->hca_cap_mr_pgsize = EHCA_PAGESIZE;
- for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2)
- if (rblock->memory_page_size_supported & pgsize_map[i])
- shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
-
- /* Set maximum number of CQs and QPs to calculate EQ size */
- if (shca->max_num_qps == -1)
- shca->max_num_qps = min_t(int, rblock->max_qp,
- EHCA_MAX_NUM_QUEUES);
- else if (shca->max_num_qps < 1 || shca->max_num_qps > rblock->max_qp) {
- ehca_gen_warn("The requested number of QPs is out of range "
- "(1 - %i) specified by HW. Value is set to %i",
- rblock->max_qp, rblock->max_qp);
- shca->max_num_qps = rblock->max_qp;
- }
-
- if (shca->max_num_cqs == -1)
- shca->max_num_cqs = min_t(int, rblock->max_cq,
- EHCA_MAX_NUM_QUEUES);
- else if (shca->max_num_cqs < 1 || shca->max_num_cqs > rblock->max_cq) {
- ehca_gen_warn("The requested number of CQs is out of range "
- "(1 - %i) specified by HW. Value is set to %i",
- rblock->max_cq, rblock->max_cq);
- }
-
- /* query max MTU from first port -- it's the same for all ports */
- port = (struct hipz_query_port *)rblock;
- h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port);
- if (h_ret != H_SUCCESS) {
- ehca_gen_err("Cannot query port properties. h_ret=%lli",
- h_ret);
- ret = -EPERM;
- goto sense_attributes1;
- }
-
- shca->max_mtu = port->max_mtu;
-
-sense_attributes1:
- ehca_free_fw_ctrlblock(rblock);
- return ret;
-}
-
-static int init_node_guid(struct ehca_shca *shca)
-{
- int ret = 0;
- struct hipz_query_hca *rblock;
-
- rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
- if (!rblock) {
- ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
- return -ENOMEM;
- }
-
- if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
- ehca_err(&shca->ib_device, "Can't query device properties");
- ret = -EINVAL;
- goto init_node_guid1;
- }
-
- memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64));
-
-init_node_guid1:
- ehca_free_fw_ctrlblock(rblock);
- return ret;
-}
-
-static int ehca_port_immutable(struct ib_device *ibdev, u8 port_num,
- struct ib_port_immutable *immutable)
-{
- struct ib_port_attr attr;
- int err;
-
- err = ehca_query_port(ibdev, port_num, &attr);
- if (err)
- return err;
-
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
- immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
- immutable->max_mad_size = IB_MGMT_MAD_SIZE;
-
- return 0;
-}
-
-static int ehca_init_device(struct ehca_shca *shca)
-{
- int ret;
-
- ret = init_node_guid(shca);
- if (ret)
- return ret;
-
- strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX);
- shca->ib_device.owner = THIS_MODULE;
-
- shca->ib_device.uverbs_abi_ver = 8;
- shca->ib_device.uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
-
- shca->ib_device.node_type = RDMA_NODE_IB_CA;
- shca->ib_device.phys_port_cnt = shca->num_ports;
- shca->ib_device.num_comp_vectors = 1;
- shca->ib_device.dma_device = &shca->ofdev->dev;
- shca->ib_device.query_device = ehca_query_device;
- shca->ib_device.query_port = ehca_query_port;
- shca->ib_device.query_gid = ehca_query_gid;
- shca->ib_device.query_pkey = ehca_query_pkey;
- /* shca->in_device.modify_device = ehca_modify_device */
- shca->ib_device.modify_port = ehca_modify_port;
- shca->ib_device.alloc_ucontext = ehca_alloc_ucontext;
- shca->ib_device.dealloc_ucontext = ehca_dealloc_ucontext;
- shca->ib_device.alloc_pd = ehca_alloc_pd;
- shca->ib_device.dealloc_pd = ehca_dealloc_pd;
- shca->ib_device.create_ah = ehca_create_ah;
- /* shca->ib_device.modify_ah = ehca_modify_ah; */
- shca->ib_device.query_ah = ehca_query_ah;
- shca->ib_device.destroy_ah = ehca_destroy_ah;
- shca->ib_device.create_qp = ehca_create_qp;
- shca->ib_device.modify_qp = ehca_modify_qp;
- shca->ib_device.query_qp = ehca_query_qp;
- shca->ib_device.destroy_qp = ehca_destroy_qp;
- shca->ib_device.post_send = ehca_post_send;
- shca->ib_device.post_recv = ehca_post_recv;
- shca->ib_device.create_cq = ehca_create_cq;
- shca->ib_device.destroy_cq = ehca_destroy_cq;
- shca->ib_device.resize_cq = ehca_resize_cq;
- shca->ib_device.poll_cq = ehca_poll_cq;
- /* shca->ib_device.peek_cq = ehca_peek_cq; */
- shca->ib_device.req_notify_cq = ehca_req_notify_cq;
- /* shca->ib_device.req_ncomp_notif = ehca_req_ncomp_notif; */
- shca->ib_device.get_dma_mr = ehca_get_dma_mr;
- shca->ib_device.reg_user_mr = ehca_reg_user_mr;
- shca->ib_device.dereg_mr = ehca_dereg_mr;
- shca->ib_device.alloc_mw = ehca_alloc_mw;
- shca->ib_device.dealloc_mw = ehca_dealloc_mw;
- shca->ib_device.alloc_fmr = ehca_alloc_fmr;
- shca->ib_device.map_phys_fmr = ehca_map_phys_fmr;
- shca->ib_device.unmap_fmr = ehca_unmap_fmr;
- shca->ib_device.dealloc_fmr = ehca_dealloc_fmr;
- shca->ib_device.attach_mcast = ehca_attach_mcast;
- shca->ib_device.detach_mcast = ehca_detach_mcast;
- shca->ib_device.process_mad = ehca_process_mad;
- shca->ib_device.mmap = ehca_mmap;
- shca->ib_device.dma_ops = &ehca_dma_mapping_ops;
- shca->ib_device.get_port_immutable = ehca_port_immutable;
-
- if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
- shca->ib_device.uverbs_cmd_mask |=
- (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
-
- shca->ib_device.create_srq = ehca_create_srq;
- shca->ib_device.modify_srq = ehca_modify_srq;
- shca->ib_device.query_srq = ehca_query_srq;
- shca->ib_device.destroy_srq = ehca_destroy_srq;
- shca->ib_device.post_srq_recv = ehca_post_srq_recv;
- }
-
- return ret;
-}
-
-static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
-{
- struct ehca_sport *sport = &shca->sport[port - 1];
- struct ib_cq *ibcq;
- struct ib_qp *ibqp;
- struct ib_qp_init_attr qp_init_attr;
- struct ib_cq_init_attr cq_attr = {};
- int ret;
-
- if (sport->ibcq_aqp1) {
- ehca_err(&shca->ib_device, "AQP1 CQ is already created.");
- return -EPERM;
- }
-
- cq_attr.cqe = 10;
- ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void *)(-1),
- &cq_attr);
- if (IS_ERR(ibcq)) {
- ehca_err(&shca->ib_device, "Cannot create AQP1 CQ.");
- return PTR_ERR(ibcq);
- }
- sport->ibcq_aqp1 = ibcq;
-
- if (sport->ibqp_sqp[IB_QPT_GSI]) {
- ehca_err(&shca->ib_device, "AQP1 QP is already created.");
- ret = -EPERM;
- goto create_aqp1;
- }
-
- memset(&qp_init_attr, 0, sizeof(struct ib_qp_init_attr));
- qp_init_attr.send_cq = ibcq;
- qp_init_attr.recv_cq = ibcq;
- qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
- qp_init_attr.cap.max_send_wr = 100;
- qp_init_attr.cap.max_recv_wr = 100;
- qp_init_attr.cap.max_send_sge = 2;
- qp_init_attr.cap.max_recv_sge = 1;
- qp_init_attr.qp_type = IB_QPT_GSI;
- qp_init_attr.port_num = port;
- qp_init_attr.qp_context = NULL;
- qp_init_attr.event_handler = NULL;
- qp_init_attr.srq = NULL;
-
- ibqp = ib_create_qp(&shca->pd->ib_pd, &qp_init_attr);
- if (IS_ERR(ibqp)) {
- ehca_err(&shca->ib_device, "Cannot create AQP1 QP.");
- ret = PTR_ERR(ibqp);
- goto create_aqp1;
- }
- sport->ibqp_sqp[IB_QPT_GSI] = ibqp;
-
- return 0;
-
-create_aqp1:
- ib_destroy_cq(sport->ibcq_aqp1);
- return ret;
-}
-
-static int ehca_destroy_aqp1(struct ehca_sport *sport)
-{
- int ret;
-
- ret = ib_destroy_qp(sport->ibqp_sqp[IB_QPT_GSI]);
- if (ret) {
- ehca_gen_err("Cannot destroy AQP1 QP. ret=%i", ret);
- return ret;
- }
-
- ret = ib_destroy_cq(sport->ibcq_aqp1);
- if (ret)
- ehca_gen_err("Cannot destroy AQP1 CQ. ret=%i", ret);
-
- return ret;
-}
-
-static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", ehca_debug_level);
-}
-
-static ssize_t ehca_store_debug_level(struct device_driver *ddp,
- const char *buf, size_t count)
-{
- int value = (*buf) - '0';
- if (value >= 0 && value <= 9)
- ehca_debug_level = value;
- return 1;
-}
-
-static DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR,
- ehca_show_debug_level, ehca_store_debug_level);
-
-static struct attribute *ehca_drv_attrs[] = {
- &driver_attr_debug_level.attr,
- NULL
-};
-
-static struct attribute_group ehca_drv_attr_grp = {
- .attrs = ehca_drv_attrs
-};
-
-static const struct attribute_group *ehca_drv_attr_groups[] = {
- &ehca_drv_attr_grp,
- NULL,
-};
-
-#define EHCA_RESOURCE_ATTR(name) \
-static ssize_t ehca_show_##name(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- struct ehca_shca *shca; \
- struct hipz_query_hca *rblock; \
- int data; \
- \
- shca = dev_get_drvdata(dev); \
- \
- rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); \
- if (!rblock) { \
- dev_err(dev, "Can't allocate rblock memory.\n"); \
- return 0; \
- } \
- \
- if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \
- dev_err(dev, "Can't query device properties\n"); \
- ehca_free_fw_ctrlblock(rblock); \
- return 0; \
- } \
- \
- data = rblock->name; \
- ehca_free_fw_ctrlblock(rblock); \
- \
- if ((strcmp(#name, "num_ports") == 0) && (ehca_nr_ports == 1)) \
- return snprintf(buf, 256, "1\n"); \
- else \
- return snprintf(buf, 256, "%d\n", data); \
- \
-} \
-static DEVICE_ATTR(name, S_IRUGO, ehca_show_##name, NULL);
-
-EHCA_RESOURCE_ATTR(num_ports);
-EHCA_RESOURCE_ATTR(hw_ver);
-EHCA_RESOURCE_ATTR(max_eq);
-EHCA_RESOURCE_ATTR(cur_eq);
-EHCA_RESOURCE_ATTR(max_cq);
-EHCA_RESOURCE_ATTR(cur_cq);
-EHCA_RESOURCE_ATTR(max_qp);
-EHCA_RESOURCE_ATTR(cur_qp);
-EHCA_RESOURCE_ATTR(max_mr);
-EHCA_RESOURCE_ATTR(cur_mr);
-EHCA_RESOURCE_ATTR(max_mw);
-EHCA_RESOURCE_ATTR(cur_mw);
-EHCA_RESOURCE_ATTR(max_pd);
-EHCA_RESOURCE_ATTR(max_ah);
-
-static ssize_t ehca_show_adapter_handle(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ehca_shca *shca = dev_get_drvdata(dev);
-
- return sprintf(buf, "%llx\n", shca->ipz_hca_handle.handle);
-
-}
-static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
-
-static struct attribute *ehca_dev_attrs[] = {
- &dev_attr_adapter_handle.attr,
- &dev_attr_num_ports.attr,
- &dev_attr_hw_ver.attr,
- &dev_attr_max_eq.attr,
- &dev_attr_cur_eq.attr,
- &dev_attr_max_cq.attr,
- &dev_attr_cur_cq.attr,
- &dev_attr_max_qp.attr,
- &dev_attr_cur_qp.attr,
- &dev_attr_max_mr.attr,
- &dev_attr_cur_mr.attr,
- &dev_attr_max_mw.attr,
- &dev_attr_cur_mw.attr,
- &dev_attr_max_pd.attr,
- &dev_attr_max_ah.attr,
- NULL
-};
-
-static struct attribute_group ehca_dev_attr_grp = {
- .attrs = ehca_dev_attrs
-};
-
-static int ehca_probe(struct platform_device *dev)
-{
- struct ehca_shca *shca;
- const u64 *handle;
- struct ib_pd *ibpd;
- int ret, i, eq_size;
- unsigned long flags;
-
- handle = of_get_property(dev->dev.of_node, "ibm,hca-handle", NULL);
- if (!handle) {
- ehca_gen_err("Cannot get eHCA handle for adapter: %s.",
- dev->dev.of_node->full_name);
- return -ENODEV;
- }
-
- if (!(*handle)) {
- ehca_gen_err("Wrong eHCA handle for adapter: %s.",
- dev->dev.of_node->full_name);
- return -ENODEV;
- }
-
- shca = (struct ehca_shca *)ib_alloc_device(sizeof(*shca));
- if (!shca) {
- ehca_gen_err("Cannot allocate shca memory.");
- return -ENOMEM;
- }
-
- mutex_init(&shca->modify_mutex);
- atomic_set(&shca->num_cqs, 0);
- atomic_set(&shca->num_qps, 0);
- shca->max_num_qps = ehca_max_qp;
- shca->max_num_cqs = ehca_max_cq;
-
- for (i = 0; i < ARRAY_SIZE(shca->sport); i++)
- spin_lock_init(&shca->sport[i].mod_sqp_lock);
-
- shca->ofdev = dev;
- shca->ipz_hca_handle.handle = *handle;
- dev_set_drvdata(&dev->dev, shca);
-
- ret = ehca_sense_attributes(shca);
- if (ret < 0) {
- ehca_gen_err("Cannot sense eHCA attributes.");
- goto probe1;
- }
-
- ret = ehca_init_device(shca);
- if (ret) {
- ehca_gen_err("Cannot init ehca device struct");
- goto probe1;
- }
-
- eq_size = 2 * shca->max_num_cqs + 4 * shca->max_num_qps;
- /* create event queues */
- ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, eq_size);
- if (ret) {
- ehca_err(&shca->ib_device, "Cannot create EQ.");
- goto probe1;
- }
-
- ret = ehca_create_eq(shca, &shca->neq, EHCA_NEQ, 513);
- if (ret) {
- ehca_err(&shca->ib_device, "Cannot create NEQ.");
- goto probe3;
- }
-
- /* create internal protection domain */
- ibpd = ehca_alloc_pd(&shca->ib_device, (void *)(-1), NULL);
- if (IS_ERR(ibpd)) {
- ehca_err(&shca->ib_device, "Cannot create internal PD.");
- ret = PTR_ERR(ibpd);
- goto probe4;
- }
-
- shca->pd = container_of(ibpd, struct ehca_pd, ib_pd);
- shca->pd->ib_pd.device = &shca->ib_device;
-
- /* create internal max MR */
- ret = ehca_reg_internal_maxmr(shca, shca->pd, &shca->maxmr);
-
- if (ret) {
- ehca_err(&shca->ib_device, "Cannot create internal MR ret=%i",
- ret);
- goto probe5;
- }
-
- ret = ib_register_device(&shca->ib_device, NULL);
- if (ret) {
- ehca_err(&shca->ib_device,
- "ib_register_device() failed ret=%i", ret);
- goto probe6;
- }
-
- /* create AQP1 for port 1 */
- if (ehca_open_aqp1 == 1) {
- shca->sport[0].port_state = IB_PORT_DOWN;
- ret = ehca_create_aqp1(shca, 1);
- if (ret) {
- ehca_err(&shca->ib_device,
- "Cannot create AQP1 for port 1.");
- goto probe7;
- }
- }
-
- /* create AQP1 for port 2 */
- if ((ehca_open_aqp1 == 1) && (shca->num_ports == 2)) {
- shca->sport[1].port_state = IB_PORT_DOWN;
- ret = ehca_create_aqp1(shca, 2);
- if (ret) {
- ehca_err(&shca->ib_device,
- "Cannot create AQP1 for port 2.");
- goto probe8;
- }
- }
-
- ret = sysfs_create_group(&dev->dev.kobj, &ehca_dev_attr_grp);
- if (ret) /* only complain; we can live without attributes */
- ehca_err(&shca->ib_device,
- "Cannot create device attributes ret=%d", ret);
-
- spin_lock_irqsave(&shca_list_lock, flags);
- list_add(&shca->shca_list, &shca_list);
- spin_unlock_irqrestore(&shca_list_lock, flags);
-
- return 0;
-
-probe8:
- ret = ehca_destroy_aqp1(&shca->sport[0]);
- if (ret)
- ehca_err(&shca->ib_device,
- "Cannot destroy AQP1 for port 1. ret=%i", ret);
-
-probe7:
- ib_unregister_device(&shca->ib_device);
-
-probe6:
- ret = ehca_dereg_internal_maxmr(shca);
- if (ret)
- ehca_err(&shca->ib_device,
- "Cannot destroy internal MR. ret=%x", ret);
-
-probe5:
- ret = ehca_dealloc_pd(&shca->pd->ib_pd);
- if (ret)
- ehca_err(&shca->ib_device,
- "Cannot destroy internal PD. ret=%x", ret);
-
-probe4:
- ret = ehca_destroy_eq(shca, &shca->neq);
- if (ret)
- ehca_err(&shca->ib_device,
- "Cannot destroy NEQ. ret=%x", ret);
-
-probe3:
- ret = ehca_destroy_eq(shca, &shca->eq);
- if (ret)
- ehca_err(&shca->ib_device,
- "Cannot destroy EQ. ret=%x", ret);
-
-probe1:
- ib_dealloc_device(&shca->ib_device);
-
- return -EINVAL;
-}
-
-static int ehca_remove(struct platform_device *dev)
-{
- struct ehca_shca *shca = dev_get_drvdata(&dev->dev);
- unsigned long flags;
- int ret;
-
- sysfs_remove_group(&dev->dev.kobj, &ehca_dev_attr_grp);
-
- if (ehca_open_aqp1 == 1) {
- int i;
- for (i = 0; i < shca->num_ports; i++) {
- ret = ehca_destroy_aqp1(&shca->sport[i]);
- if (ret)
- ehca_err(&shca->ib_device,
- "Cannot destroy AQP1 for port %x "
- "ret=%i", ret, i);
- }
- }
-
- ib_unregister_device(&shca->ib_device);
-
- ret = ehca_dereg_internal_maxmr(shca);
- if (ret)
- ehca_err(&shca->ib_device,
- "Cannot destroy internal MR. ret=%i", ret);
-
- ret = ehca_dealloc_pd(&shca->pd->ib_pd);
- if (ret)
- ehca_err(&shca->ib_device,
- "Cannot destroy internal PD. ret=%i", ret);
-
- ret = ehca_destroy_eq(shca, &shca->eq);
- if (ret)
- ehca_err(&shca->ib_device, "Cannot destroy EQ. ret=%i", ret);
-
- ret = ehca_destroy_eq(shca, &shca->neq);
- if (ret)
- ehca_err(&shca->ib_device, "Canot destroy NEQ. ret=%i", ret);
-
- ib_dealloc_device(&shca->ib_device);
-
- spin_lock_irqsave(&shca_list_lock, flags);
- list_del(&shca->shca_list);
- spin_unlock_irqrestore(&shca_list_lock, flags);
-
- return ret;
-}
-
-static struct of_device_id ehca_device_table[] =
-{
- {
- .name = "lhca",
- .compatible = "IBM,lhca",
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, ehca_device_table);
-
-static struct platform_driver ehca_driver = {
- .probe = ehca_probe,
- .remove = ehca_remove,
- .driver = {
- .name = "ehca",
- .owner = THIS_MODULE,
- .groups = ehca_drv_attr_groups,
- .of_match_table = ehca_device_table,
- },
-};
-
-void ehca_poll_eqs(unsigned long data)
-{
- struct ehca_shca *shca;
-
- spin_lock(&shca_list_lock);
- list_for_each_entry(shca, &shca_list, shca_list) {
- if (shca->eq.is_initialized) {
- /* call deadman proc only if eq ptr does not change */
- struct ehca_eq *eq = &shca->eq;
- int max = 3;
- volatile u64 q_ofs, q_ofs2;
- unsigned long flags;
- spin_lock_irqsave(&eq->spinlock, flags);
- q_ofs = eq->ipz_queue.current_q_offset;
- spin_unlock_irqrestore(&eq->spinlock, flags);
- do {
- spin_lock_irqsave(&eq->spinlock, flags);
- q_ofs2 = eq->ipz_queue.current_q_offset;
- spin_unlock_irqrestore(&eq->spinlock, flags);
- max--;
- } while (q_ofs == q_ofs2 && max > 0);
- if (q_ofs == q_ofs2)
- ehca_process_eq(shca, 0);
- }
- }
- mod_timer(&poll_eqs_timer, round_jiffies(jiffies + HZ));
- spin_unlock(&shca_list_lock);
-}
-
-static int ehca_mem_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- static unsigned long ehca_dmem_warn_time;
- unsigned long flags;
-
- switch (action) {
- case MEM_CANCEL_OFFLINE:
- case MEM_CANCEL_ONLINE:
- case MEM_ONLINE:
- case MEM_OFFLINE:
- return NOTIFY_OK;
- case MEM_GOING_ONLINE:
- case MEM_GOING_OFFLINE:
- /* only ok if no hca is attached to the lpar */
- spin_lock_irqsave(&shca_list_lock, flags);
- if (list_empty(&shca_list)) {
- spin_unlock_irqrestore(&shca_list_lock, flags);
- return NOTIFY_OK;
- } else {
- spin_unlock_irqrestore(&shca_list_lock, flags);
- if (printk_timed_ratelimit(&ehca_dmem_warn_time,
- 30 * 1000))
- ehca_gen_err("DMEM operations are not allowed"
- "in conjunction with eHCA");
- return NOTIFY_BAD;
- }
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block ehca_mem_nb = {
- .notifier_call = ehca_mem_notifier,
-};
-
-static int __init ehca_module_init(void)
-{
- int ret;
-
- printk(KERN_INFO "eHCA Infiniband Device Driver "
- "(Version " HCAD_VERSION ")\n");
-
- ret = ehca_create_comp_pool();
- if (ret) {
- ehca_gen_err("Cannot create comp pool.");
- return ret;
- }
-
- ret = ehca_create_slab_caches();
- if (ret) {
- ehca_gen_err("Cannot create SLAB caches");
- ret = -ENOMEM;
- goto module_init1;
- }
-
- ret = ehca_create_busmap();
- if (ret) {
- ehca_gen_err("Cannot create busmap.");
- goto module_init2;
- }
-
- ret = ibmebus_register_driver(&ehca_driver);
- if (ret) {
- ehca_gen_err("Cannot register eHCA device driver");
- ret = -EINVAL;
- goto module_init3;
- }
-
- ret = register_memory_notifier(&ehca_mem_nb);
- if (ret) {
- ehca_gen_err("Failed registering memory add/remove notifier");
- goto module_init4;
- }
-
- if (ehca_poll_all_eqs != 1) {
- ehca_gen_err("WARNING!!!");
- ehca_gen_err("It is possible to lose interrupts.");
- } else {
- init_timer(&poll_eqs_timer);
- poll_eqs_timer.function = ehca_poll_eqs;
- poll_eqs_timer.expires = jiffies + HZ;
- add_timer(&poll_eqs_timer);
- }
-
- return 0;
-
-module_init4:
- ibmebus_unregister_driver(&ehca_driver);
-
-module_init3:
- ehca_destroy_busmap();
-
-module_init2:
- ehca_destroy_slab_caches();
-
-module_init1:
- ehca_destroy_comp_pool();
- return ret;
-};
-
-static void __exit ehca_module_exit(void)
-{
- if (ehca_poll_all_eqs == 1)
- del_timer_sync(&poll_eqs_timer);
-
- ibmebus_unregister_driver(&ehca_driver);
-
- unregister_memory_notifier(&ehca_mem_nb);
-
- ehca_destroy_busmap();
-
- ehca_destroy_slab_caches();
-
- ehca_destroy_comp_pool();
-
- idr_destroy(&ehca_cq_idr);
- idr_destroy(&ehca_qp_idr);
-};
-
-module_init(ehca_module_init);
-module_exit(ehca_module_exit);
diff --git a/drivers/staging/rdma/ehca/ehca_mcast.c b/drivers/staging/rdma/ehca/ehca_mcast.c
deleted file mode 100644
index cec181532924..000000000000
--- a/drivers/staging/rdma/ehca/ehca_mcast.c
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * mcast functions
- *
- * Authors: Khadija Souissi <souissik@de.ibm.com>
- * Waleri Fomin <fomin@de.ibm.com>
- * Reinhard Ernst <rernst@de.ibm.com>
- * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- * Heiko J Schick <schickhj@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/module.h>
-#include <linux/err.h>
-#include "ehca_classes.h"
-#include "ehca_tools.h"
-#include "ehca_qes.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-
-#define MAX_MC_LID 0xFFFE
-#define MIN_MC_LID 0xC000 /* Multicast limits */
-#define EHCA_VALID_MULTICAST_GID(gid) ((gid)[0] == 0xFF)
-#define EHCA_VALID_MULTICAST_LID(lid) \
- (((lid) >= MIN_MC_LID) && ((lid) <= MAX_MC_LID))
-
-int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
- struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
- ib_device);
- union ib_gid my_gid;
- u64 subnet_prefix, interface_id, h_ret;
-
- if (ibqp->qp_type != IB_QPT_UD) {
- ehca_err(ibqp->device, "invalid qp_type=%x", ibqp->qp_type);
- return -EINVAL;
- }
-
- if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
- ehca_err(ibqp->device, "invalid mulitcast gid");
- return -EINVAL;
- } else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
- ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
- return -EINVAL;
- }
-
- memcpy(&my_gid, gid->raw, sizeof(union ib_gid));
-
- subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
- interface_id = be64_to_cpu(my_gid.global.interface_id);
- h_ret = hipz_h_attach_mcqp(shca->ipz_hca_handle,
- my_qp->ipz_qp_handle,
- my_qp->galpas.kernel,
- lid, subnet_prefix, interface_id);
- if (h_ret != H_SUCCESS)
- ehca_err(ibqp->device,
- "ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed "
- "h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
-
- return ehca2ib_return_code(h_ret);
-}
-
-int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
- struct ehca_shca *shca = container_of(ibqp->pd->device,
- struct ehca_shca, ib_device);
- union ib_gid my_gid;
- u64 subnet_prefix, interface_id, h_ret;
-
- if (ibqp->qp_type != IB_QPT_UD) {
- ehca_err(ibqp->device, "invalid qp_type %x", ibqp->qp_type);
- return -EINVAL;
- }
-
- if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
- ehca_err(ibqp->device, "invalid mulitcast gid");
- return -EINVAL;
- } else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
- ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
- return -EINVAL;
- }
-
- memcpy(&my_gid, gid->raw, sizeof(union ib_gid));
-
- subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
- interface_id = be64_to_cpu(my_gid.global.interface_id);
- h_ret = hipz_h_detach_mcqp(shca->ipz_hca_handle,
- my_qp->ipz_qp_handle,
- my_qp->galpas.kernel,
- lid, subnet_prefix, interface_id);
- if (h_ret != H_SUCCESS)
- ehca_err(ibqp->device,
- "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed "
- "h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
-
- return ehca2ib_return_code(h_ret);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_mrmw.c b/drivers/staging/rdma/ehca/ehca_mrmw.c
deleted file mode 100644
index 3367205e3160..000000000000
--- a/drivers/staging/rdma/ehca/ehca_mrmw.c
+++ /dev/null
@@ -1,2202 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * MR/MW functions
- *
- * Authors: Dietmar Decker <ddecker@de.ibm.com>
- * Christoph Raisch <raisch@de.ibm.com>
- * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-#include <rdma/ib_umem.h>
-
-#include "ehca_iverbs.h"
-#include "ehca_mrmw.h"
-#include "hcp_if.h"
-#include "hipz_hw.h"
-
-#define NUM_CHUNKS(length, chunk_size) \
- (((length) + (chunk_size - 1)) / (chunk_size))
-
-/* max number of rpages (per hcall register_rpages) */
-#define MAX_RPAGES 512
-
-/* DMEM toleration management */
-#define EHCA_SECTSHIFT SECTION_SIZE_BITS
-#define EHCA_SECTSIZE (1UL << EHCA_SECTSHIFT)
-#define EHCA_HUGEPAGESHIFT 34
-#define EHCA_HUGEPAGE_SIZE (1UL << EHCA_HUGEPAGESHIFT)
-#define EHCA_HUGEPAGE_PFN_MASK ((EHCA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
-#define EHCA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
-#define EHCA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */
-#define EHCA_TOP_INDEX_SHIFT (EHCA_DIR_INDEX_SHIFT * 2)
-#define EHCA_MAP_ENTRIES (1 << EHCA_DIR_INDEX_SHIFT)
-#define EHCA_TOP_MAP_SIZE (0x10000) /* currently fixed map size */
-#define EHCA_DIR_MAP_SIZE (0x10000)
-#define EHCA_ENT_MAP_SIZE (0x10000)
-#define EHCA_INDEX_MASK (EHCA_MAP_ENTRIES - 1)
-
-static unsigned long ehca_mr_len;
-
-/*
- * Memory map data structures
- */
-struct ehca_dir_bmap {
- u64 ent[EHCA_MAP_ENTRIES];
-};
-struct ehca_top_bmap {
- struct ehca_dir_bmap *dir[EHCA_MAP_ENTRIES];
-};
-struct ehca_bmap {
- struct ehca_top_bmap *top[EHCA_MAP_ENTRIES];
-};
-
-static struct ehca_bmap *ehca_bmap;
-
-static struct kmem_cache *mr_cache;
-static struct kmem_cache *mw_cache;
-
-enum ehca_mr_pgsize {
- EHCA_MR_PGSIZE4K = 0x1000L,
- EHCA_MR_PGSIZE64K = 0x10000L,
- EHCA_MR_PGSIZE1M = 0x100000L,
- EHCA_MR_PGSIZE16M = 0x1000000L
-};
-
-#define EHCA_MR_PGSHIFT4K 12
-#define EHCA_MR_PGSHIFT64K 16
-#define EHCA_MR_PGSHIFT1M 20
-#define EHCA_MR_PGSHIFT16M 24
-
-static u64 ehca_map_vaddr(void *caddr);
-
-static u32 ehca_encode_hwpage_size(u32 pgsize)
-{
- int log = ilog2(pgsize);
- WARN_ON(log < 12 || log > 24 || log & 3);
- return (log - 12) / 4;
-}
-
-static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca)
-{
- return rounddown_pow_of_two(shca->hca_cap_mr_pgsize);
-}
-
-static struct ehca_mr *ehca_mr_new(void)
-{
- struct ehca_mr *me;
-
- me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
- if (me)
- spin_lock_init(&me->mrlock);
- else
- ehca_gen_err("alloc failed");
-
- return me;
-}
-
-static void ehca_mr_delete(struct ehca_mr *me)
-{
- kmem_cache_free(mr_cache, me);
-}
-
-static struct ehca_mw *ehca_mw_new(void)
-{
- struct ehca_mw *me;
-
- me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
- if (me)
- spin_lock_init(&me->mwlock);
- else
- ehca_gen_err("alloc failed");
-
- return me;
-}
-
-static void ehca_mw_delete(struct ehca_mw *me)
-{
- kmem_cache_free(mw_cache, me);
-}
-
-/*----------------------------------------------------------------------*/
-
-struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
-{
- struct ib_mr *ib_mr;
- int ret;
- struct ehca_mr *e_maxmr;
- struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
- struct ehca_shca *shca =
- container_of(pd->device, struct ehca_shca, ib_device);
-
- if (shca->maxmr) {
- e_maxmr = ehca_mr_new();
- if (!e_maxmr) {
- ehca_err(&shca->ib_device, "out of memory");
- ib_mr = ERR_PTR(-ENOMEM);
- goto get_dma_mr_exit0;
- }
-
- ret = ehca_reg_maxmr(shca, e_maxmr,
- (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)),
- mr_access_flags, e_pd,
- &e_maxmr->ib.ib_mr.lkey,
- &e_maxmr->ib.ib_mr.rkey);
- if (ret) {
- ehca_mr_delete(e_maxmr);
- ib_mr = ERR_PTR(ret);
- goto get_dma_mr_exit0;
- }
- ib_mr = &e_maxmr->ib.ib_mr;
- } else {
- ehca_err(&shca->ib_device, "no internal max-MR exist!");
- ib_mr = ERR_PTR(-EINVAL);
- goto get_dma_mr_exit0;
- }
-
-get_dma_mr_exit0:
- if (IS_ERR(ib_mr))
- ehca_err(&shca->ib_device, "h_ret=%li pd=%p mr_access_flags=%x",
- PTR_ERR(ib_mr), pd, mr_access_flags);
- return ib_mr;
-} /* end ehca_get_dma_mr() */
-
-/*----------------------------------------------------------------------*/
-
-struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt, int mr_access_flags,
- struct ib_udata *udata)
-{
- struct ib_mr *ib_mr;
- struct ehca_mr *e_mr;
- struct ehca_shca *shca =
- container_of(pd->device, struct ehca_shca, ib_device);
- struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
- struct ehca_mr_pginfo pginfo;
- int ret, page_shift;
- u32 num_kpages;
- u32 num_hwpages;
- u64 hwpage_size;
-
- if (!pd) {
- ehca_gen_err("bad pd=%p", pd);
- return ERR_PTR(-EFAULT);
- }
-
- if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
- !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
- ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
- !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
- /*
- * Remote Write Access requires Local Write Access
- * Remote Atomic Access requires Local Write Access
- */
- ehca_err(pd->device, "bad input values: mr_access_flags=%x",
- mr_access_flags);
- ib_mr = ERR_PTR(-EINVAL);
- goto reg_user_mr_exit0;
- }
-
- if (length == 0 || virt + length < virt) {
- ehca_err(pd->device, "bad input values: length=%llx "
- "virt_base=%llx", length, virt);
- ib_mr = ERR_PTR(-EINVAL);
- goto reg_user_mr_exit0;
- }
-
- e_mr = ehca_mr_new();
- if (!e_mr) {
- ehca_err(pd->device, "out of memory");
- ib_mr = ERR_PTR(-ENOMEM);
- goto reg_user_mr_exit0;
- }
-
- e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
- mr_access_flags, 0);
- if (IS_ERR(e_mr->umem)) {
- ib_mr = (void *)e_mr->umem;
- goto reg_user_mr_exit1;
- }
-
- if (e_mr->umem->page_size != PAGE_SIZE) {
- ehca_err(pd->device, "page size not supported, "
- "e_mr->umem->page_size=%x", e_mr->umem->page_size);
- ib_mr = ERR_PTR(-EINVAL);
- goto reg_user_mr_exit2;
- }
-
- /* determine number of MR pages */
- num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
- /* select proper hw_pgsize */
- page_shift = PAGE_SHIFT;
- if (e_mr->umem->hugetlb) {
- /* determine page_shift, clamp between 4K and 16M */
- page_shift = (fls64(length - 1) + 3) & ~3;
- page_shift = min(max(page_shift, EHCA_MR_PGSHIFT4K),
- EHCA_MR_PGSHIFT16M);
- }
- hwpage_size = 1UL << page_shift;
-
- /* now that we have the desired page size, shift until it's
- * supported, too. 4K is always supported, so this terminates.
- */
- while (!(hwpage_size & shca->hca_cap_mr_pgsize))
- hwpage_size >>= 4;
-
-reg_user_mr_fallback:
- num_hwpages = NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size);
- /* register MR on HCA */
- memset(&pginfo, 0, sizeof(pginfo));
- pginfo.type = EHCA_MR_PGI_USER;
- pginfo.hwpage_size = hwpage_size;
- pginfo.num_kpages = num_kpages;
- pginfo.num_hwpages = num_hwpages;
- pginfo.u.usr.region = e_mr->umem;
- pginfo.next_hwpage = ib_umem_offset(e_mr->umem) / hwpage_size;
- pginfo.u.usr.next_sg = pginfo.u.usr.region->sg_head.sgl;
- ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
- e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
- &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
- if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
- ehca_warn(pd->device, "failed to register mr "
- "with hwpage_size=%llx", hwpage_size);
- ehca_info(pd->device, "try to register mr with "
- "kpage_size=%lx", PAGE_SIZE);
- /*
- * this means kpages are not contiguous for a hw page
- * try kernel page size as fallback solution
- */
- hwpage_size = PAGE_SIZE;
- goto reg_user_mr_fallback;
- }
- if (ret) {
- ib_mr = ERR_PTR(ret);
- goto reg_user_mr_exit2;
- }
-
- /* successful registration of all pages */
- return &e_mr->ib.ib_mr;
-
-reg_user_mr_exit2:
- ib_umem_release(e_mr->umem);
-reg_user_mr_exit1:
- ehca_mr_delete(e_mr);
-reg_user_mr_exit0:
- if (IS_ERR(ib_mr))
- ehca_err(pd->device, "rc=%li pd=%p mr_access_flags=%x udata=%p",
- PTR_ERR(ib_mr), pd, mr_access_flags, udata);
- return ib_mr;
-} /* end ehca_reg_user_mr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_dereg_mr(struct ib_mr *mr)
-{
- int ret = 0;
- u64 h_ret;
- struct ehca_shca *shca =
- container_of(mr->device, struct ehca_shca, ib_device);
- struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
-
- if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
- ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
- "e_mr->flags=%x", mr, e_mr, e_mr->flags);
- ret = -EINVAL;
- goto dereg_mr_exit0;
- } else if (e_mr == shca->maxmr) {
- /* should be impossible, however reject to be sure */
- ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
- "shca->maxmr=%p mr->lkey=%x",
- mr, shca->maxmr, mr->lkey);
- ret = -EINVAL;
- goto dereg_mr_exit0;
- }
-
- /* TODO: BUSY: MR still has bound window(s) */
- h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
- if (h_ret != H_SUCCESS) {
- ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lli shca=%p "
- "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x",
- h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
- e_mr->ipz_mr_handle.handle, mr->lkey);
- ret = ehca2ib_return_code(h_ret);
- goto dereg_mr_exit0;
- }
-
- if (e_mr->umem)
- ib_umem_release(e_mr->umem);
-
- /* successful deregistration */
- ehca_mr_delete(e_mr);
-
-dereg_mr_exit0:
- if (ret)
- ehca_err(mr->device, "ret=%i mr=%p", ret, mr);
- return ret;
-} /* end ehca_dereg_mr() */
-
-/*----------------------------------------------------------------------*/
-
-struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
-{
- struct ib_mw *ib_mw;
- u64 h_ret;
- struct ehca_mw *e_mw;
- struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
- struct ehca_shca *shca =
- container_of(pd->device, struct ehca_shca, ib_device);
- struct ehca_mw_hipzout_parms hipzout;
-
- if (type != IB_MW_TYPE_1)
- return ERR_PTR(-EINVAL);
-
- e_mw = ehca_mw_new();
- if (!e_mw) {
- ib_mw = ERR_PTR(-ENOMEM);
- goto alloc_mw_exit0;
- }
-
- h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
- e_pd->fw_pd, &hipzout);
- if (h_ret != H_SUCCESS) {
- ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lli "
- "shca=%p hca_hndl=%llx mw=%p",
- h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
- ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
- goto alloc_mw_exit1;
- }
- /* successful MW allocation */
- e_mw->ipz_mw_handle = hipzout.handle;
- e_mw->ib_mw.rkey = hipzout.rkey;
- return &e_mw->ib_mw;
-
-alloc_mw_exit1:
- ehca_mw_delete(e_mw);
-alloc_mw_exit0:
- if (IS_ERR(ib_mw))
- ehca_err(pd->device, "h_ret=%li pd=%p", PTR_ERR(ib_mw), pd);
- return ib_mw;
-} /* end ehca_alloc_mw() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_dealloc_mw(struct ib_mw *mw)
-{
- u64 h_ret;
- struct ehca_shca *shca =
- container_of(mw->device, struct ehca_shca, ib_device);
- struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
-
- h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
- if (h_ret != H_SUCCESS) {
- ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lli shca=%p "
- "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx",
- h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
- e_mw->ipz_mw_handle.handle);
- return ehca2ib_return_code(h_ret);
- }
- /* successful deallocation */
- ehca_mw_delete(e_mw);
- return 0;
-} /* end ehca_dealloc_mw() */
-
-/*----------------------------------------------------------------------*/
-
-struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
- int mr_access_flags,
- struct ib_fmr_attr *fmr_attr)
-{
- struct ib_fmr *ib_fmr;
- struct ehca_shca *shca =
- container_of(pd->device, struct ehca_shca, ib_device);
- struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
- struct ehca_mr *e_fmr;
- int ret;
- u32 tmp_lkey, tmp_rkey;
- struct ehca_mr_pginfo pginfo;
- u64 hw_pgsize;
-
- /* check other parameters */
- if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
- !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
- ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
- !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
- /*
- * Remote Write Access requires Local Write Access
- * Remote Atomic Access requires Local Write Access
- */
- ehca_err(pd->device, "bad input values: mr_access_flags=%x",
- mr_access_flags);
- ib_fmr = ERR_PTR(-EINVAL);
- goto alloc_fmr_exit0;
- }
- if (mr_access_flags & IB_ACCESS_MW_BIND) {
- ehca_err(pd->device, "bad input values: mr_access_flags=%x",
- mr_access_flags);
- ib_fmr = ERR_PTR(-EINVAL);
- goto alloc_fmr_exit0;
- }
- if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
- ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
- "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
- fmr_attr->max_pages, fmr_attr->max_maps,
- fmr_attr->page_shift);
- ib_fmr = ERR_PTR(-EINVAL);
- goto alloc_fmr_exit0;
- }
-
- hw_pgsize = 1 << fmr_attr->page_shift;
- if (!(hw_pgsize & shca->hca_cap_mr_pgsize)) {
- ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
- fmr_attr->page_shift);
- ib_fmr = ERR_PTR(-EINVAL);
- goto alloc_fmr_exit0;
- }
-
- e_fmr = ehca_mr_new();
- if (!e_fmr) {
- ib_fmr = ERR_PTR(-ENOMEM);
- goto alloc_fmr_exit0;
- }
- e_fmr->flags |= EHCA_MR_FLAG_FMR;
-
- /* register MR on HCA */
- memset(&pginfo, 0, sizeof(pginfo));
- pginfo.hwpage_size = hw_pgsize;
- /*
- * pginfo.num_hwpages==0, ie register_rpages() will not be called
- * but deferred to map_phys_fmr()
- */
- ret = ehca_reg_mr(shca, e_fmr, NULL,
- fmr_attr->max_pages * (1 << fmr_attr->page_shift),
- mr_access_flags, e_pd, &pginfo,
- &tmp_lkey, &tmp_rkey, EHCA_REG_MR);
- if (ret) {
- ib_fmr = ERR_PTR(ret);
- goto alloc_fmr_exit1;
- }
-
- /* successful */
- e_fmr->hwpage_size = hw_pgsize;
- e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
- e_fmr->fmr_max_pages = fmr_attr->max_pages;
- e_fmr->fmr_max_maps = fmr_attr->max_maps;
- e_fmr->fmr_map_cnt = 0;
- return &e_fmr->ib.ib_fmr;
-
-alloc_fmr_exit1:
- ehca_mr_delete(e_fmr);
-alloc_fmr_exit0:
- return ib_fmr;
-} /* end ehca_alloc_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_map_phys_fmr(struct ib_fmr *fmr,
- u64 *page_list,
- int list_len,
- u64 iova)
-{
- int ret;
- struct ehca_shca *shca =
- container_of(fmr->device, struct ehca_shca, ib_device);
- struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
- struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
- struct ehca_mr_pginfo pginfo;
- u32 tmp_lkey, tmp_rkey;
-
- if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
- ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
- e_fmr, e_fmr->flags);
- ret = -EINVAL;
- goto map_phys_fmr_exit0;
- }
- ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
- if (ret)
- goto map_phys_fmr_exit0;
- if (iova % e_fmr->fmr_page_size) {
- /* only whole-numbered pages */
- ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x",
- iova, e_fmr->fmr_page_size);
- ret = -EINVAL;
- goto map_phys_fmr_exit0;
- }
- if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
- /* HCAD does not limit the maps, however trace this anyway */
- ehca_info(fmr->device, "map limit exceeded, fmr=%p "
- "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
- fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
- }
-
- memset(&pginfo, 0, sizeof(pginfo));
- pginfo.type = EHCA_MR_PGI_FMR;
- pginfo.num_kpages = list_len;
- pginfo.hwpage_size = e_fmr->hwpage_size;
- pginfo.num_hwpages =
- list_len * e_fmr->fmr_page_size / pginfo.hwpage_size;
- pginfo.u.fmr.page_list = page_list;
- pginfo.next_hwpage =
- (iova & (e_fmr->fmr_page_size-1)) / pginfo.hwpage_size;
- pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size;
-
- ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova,
- list_len * e_fmr->fmr_page_size,
- e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
- if (ret)
- goto map_phys_fmr_exit0;
-
- /* successful reregistration */
- e_fmr->fmr_map_cnt++;
- e_fmr->ib.ib_fmr.lkey = tmp_lkey;
- e_fmr->ib.ib_fmr.rkey = tmp_rkey;
- return 0;
-
-map_phys_fmr_exit0:
- if (ret)
- ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x "
- "iova=%llx", ret, fmr, page_list, list_len, iova);
- return ret;
-} /* end ehca_map_phys_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_unmap_fmr(struct list_head *fmr_list)
-{
- int ret = 0;
- struct ib_fmr *ib_fmr;
- struct ehca_shca *shca = NULL;
- struct ehca_shca *prev_shca;
- struct ehca_mr *e_fmr;
- u32 num_fmr = 0;
- u32 unmap_fmr_cnt = 0;
-
- /* check all FMR belong to same SHCA, and check internal flag */
- list_for_each_entry(ib_fmr, fmr_list, list) {
- prev_shca = shca;
- shca = container_of(ib_fmr->device, struct ehca_shca,
- ib_device);
- e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
- if ((shca != prev_shca) && prev_shca) {
- ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
- "prev_shca=%p e_fmr=%p",
- shca, prev_shca, e_fmr);
- ret = -EINVAL;
- goto unmap_fmr_exit0;
- }
- if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
- ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
- "e_fmr->flags=%x", e_fmr, e_fmr->flags);
- ret = -EINVAL;
- goto unmap_fmr_exit0;
- }
- num_fmr++;
- }
-
- /* loop over all FMRs to unmap */
- list_for_each_entry(ib_fmr, fmr_list, list) {
- unmap_fmr_cnt++;
- e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
- shca = container_of(ib_fmr->device, struct ehca_shca,
- ib_device);
- ret = ehca_unmap_one_fmr(shca, e_fmr);
- if (ret) {
- /* unmap failed, stop unmapping of rest of FMRs */
- ehca_err(&shca->ib_device, "unmap of one FMR failed, "
- "stop rest, e_fmr=%p num_fmr=%x "
- "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
- unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
- goto unmap_fmr_exit0;
- }
- }
-
-unmap_fmr_exit0:
- if (ret)
- ehca_gen_err("ret=%i fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
- ret, fmr_list, num_fmr, unmap_fmr_cnt);
- return ret;
-} /* end ehca_unmap_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_dealloc_fmr(struct ib_fmr *fmr)
-{
- int ret;
- u64 h_ret;
- struct ehca_shca *shca =
- container_of(fmr->device, struct ehca_shca, ib_device);
- struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
-
- if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
- ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
- e_fmr, e_fmr->flags);
- ret = -EINVAL;
- goto free_fmr_exit0;
- }
-
- h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
- if (h_ret != H_SUCCESS) {
- ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lli e_fmr=%p "
- "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x",
- h_ret, e_fmr, shca->ipz_hca_handle.handle,
- e_fmr->ipz_mr_handle.handle, fmr->lkey);
- ret = ehca2ib_return_code(h_ret);
- goto free_fmr_exit0;
- }
- /* successful deregistration */
- ehca_mr_delete(e_fmr);
- return 0;
-
-free_fmr_exit0:
- if (ret)
- ehca_err(&shca->ib_device, "ret=%i fmr=%p", ret, fmr);
- return ret;
-} /* end ehca_dealloc_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
- struct ehca_mr *e_mr,
- struct ehca_mr_pginfo *pginfo);
-
-int ehca_reg_mr(struct ehca_shca *shca,
- struct ehca_mr *e_mr,
- u64 *iova_start,
- u64 size,
- int acl,
- struct ehca_pd *e_pd,
- struct ehca_mr_pginfo *pginfo,
- u32 *lkey, /*OUT*/
- u32 *rkey, /*OUT*/
- enum ehca_reg_type reg_type)
-{
- int ret;
- u64 h_ret;
- u32 hipz_acl;
- struct ehca_mr_hipzout_parms hipzout;
-
- ehca_mrmw_map_acl(acl, &hipz_acl);
- ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
- if (ehca_use_hp_mr == 1)
- hipz_acl |= 0x00000001;
-
- h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
- (u64)iova_start, size, hipz_acl,
- e_pd->fw_pd, &hipzout);
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lli "
- "hca_hndl=%llx", h_ret, shca->ipz_hca_handle.handle);
- ret = ehca2ib_return_code(h_ret);
- goto ehca_reg_mr_exit0;
- }
-
- e_mr->ipz_mr_handle = hipzout.handle;
-
- if (reg_type == EHCA_REG_BUSMAP_MR)
- ret = ehca_reg_bmap_mr_rpages(shca, e_mr, pginfo);
- else if (reg_type == EHCA_REG_MR)
- ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
- else
- ret = -EINVAL;
-
- if (ret)
- goto ehca_reg_mr_exit1;
-
- /* successful registration */
- e_mr->num_kpages = pginfo->num_kpages;
- e_mr->num_hwpages = pginfo->num_hwpages;
- e_mr->hwpage_size = pginfo->hwpage_size;
- e_mr->start = iova_start;
- e_mr->size = size;
- e_mr->acl = acl;
- *lkey = hipzout.lkey;
- *rkey = hipzout.rkey;
- return 0;
-
-ehca_reg_mr_exit1:
- h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "h_ret=%lli shca=%p e_mr=%p "
- "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x "
- "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i",
- h_ret, shca, e_mr, iova_start, size, acl, e_pd,
- hipzout.lkey, pginfo, pginfo->num_kpages,
- pginfo->num_hwpages, ret);
- ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
- "not recoverable");
- }
-ehca_reg_mr_exit0:
- if (ret)
- ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
- "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
- "num_kpages=%llx num_hwpages=%llx",
- ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
- pginfo->num_kpages, pginfo->num_hwpages);
- return ret;
-} /* end ehca_reg_mr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_reg_mr_rpages(struct ehca_shca *shca,
- struct ehca_mr *e_mr,
- struct ehca_mr_pginfo *pginfo)
-{
- int ret = 0;
- u64 h_ret;
- u32 rnum;
- u64 rpage;
- u32 i;
- u64 *kpage;
-
- if (!pginfo->num_hwpages) /* in case of fmr */
- return 0;
-
- kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
- if (!kpage) {
- ehca_err(&shca->ib_device, "kpage alloc failed");
- ret = -ENOMEM;
- goto ehca_reg_mr_rpages_exit0;
- }
-
- /* max MAX_RPAGES ehca mr pages per register call */
- for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
-
- if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
- rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */
- if (rnum == 0)
- rnum = MAX_RPAGES; /* last shot is full */
- } else
- rnum = MAX_RPAGES;
-
- ret = ehca_set_pagebuf(pginfo, rnum, kpage);
- if (ret) {
- ehca_err(&shca->ib_device, "ehca_set_pagebuf "
- "bad rc, ret=%i rnum=%x kpage=%p",
- ret, rnum, kpage);
- goto ehca_reg_mr_rpages_exit1;
- }
-
- if (rnum > 1) {
- rpage = __pa(kpage);
- if (!rpage) {
- ehca_err(&shca->ib_device, "kpage=%p i=%x",
- kpage, i);
- ret = -EFAULT;
- goto ehca_reg_mr_rpages_exit1;
- }
- } else
- rpage = *kpage;
-
- h_ret = hipz_h_register_rpage_mr(
- shca->ipz_hca_handle, e_mr,
- ehca_encode_hwpage_size(pginfo->hwpage_size),
- 0, rpage, rnum);
-
- if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
- /*
- * check for 'registration complete'==H_SUCCESS
- * and for 'page registered'==H_PAGE_REGISTERED
- */
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "last "
- "hipz_reg_rpage_mr failed, h_ret=%lli "
- "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx"
- " lkey=%x", h_ret, e_mr, i,
- shca->ipz_hca_handle.handle,
- e_mr->ipz_mr_handle.handle,
- e_mr->ib.ib_mr.lkey);
- ret = ehca2ib_return_code(h_ret);
- break;
- } else
- ret = 0;
- } else if (h_ret != H_PAGE_REGISTERED) {
- ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
- "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx "
- "mr_hndl=%llx", h_ret, e_mr, i,
- e_mr->ib.ib_mr.lkey,
- shca->ipz_hca_handle.handle,
- e_mr->ipz_mr_handle.handle);
- ret = ehca2ib_return_code(h_ret);
- break;
- } else
- ret = 0;
- } /* end for(i) */
-
-
-ehca_reg_mr_rpages_exit1:
- ehca_free_fw_ctrlblock(kpage);
-ehca_reg_mr_rpages_exit0:
- if (ret)
- ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p "
- "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr,
- pginfo, pginfo->num_kpages, pginfo->num_hwpages);
- return ret;
-} /* end ehca_reg_mr_rpages() */
-
-/*----------------------------------------------------------------------*/
-
-inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
- struct ehca_mr *e_mr,
- u64 *iova_start,
- u64 size,
- u32 acl,
- struct ehca_pd *e_pd,
- struct ehca_mr_pginfo *pginfo,
- u32 *lkey, /*OUT*/
- u32 *rkey) /*OUT*/
-{
- int ret;
- u64 h_ret;
- u32 hipz_acl;
- u64 *kpage;
- u64 rpage;
- struct ehca_mr_pginfo pginfo_save;
- struct ehca_mr_hipzout_parms hipzout;
-
- ehca_mrmw_map_acl(acl, &hipz_acl);
- ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
-
- kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
- if (!kpage) {
- ehca_err(&shca->ib_device, "kpage alloc failed");
- ret = -ENOMEM;
- goto ehca_rereg_mr_rereg1_exit0;
- }
-
- pginfo_save = *pginfo;
- ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
- if (ret) {
- ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
- "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx "
- "kpage=%p", e_mr, pginfo, pginfo->type,
- pginfo->num_kpages, pginfo->num_hwpages, kpage);
- goto ehca_rereg_mr_rereg1_exit1;
- }
- rpage = __pa(kpage);
- if (!rpage) {
- ehca_err(&shca->ib_device, "kpage=%p", kpage);
- ret = -EFAULT;
- goto ehca_rereg_mr_rereg1_exit1;
- }
- h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
- (u64)iova_start, size, hipz_acl,
- e_pd->fw_pd, rpage, &hipzout);
- if (h_ret != H_SUCCESS) {
- /*
- * reregistration unsuccessful, try it again with the 3 hCalls,
- * e.g. this is required in case H_MR_CONDITION
- * (MW bound or MR is shared)
- */
- ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
- "(Rereg1), h_ret=%lli e_mr=%p", h_ret, e_mr);
- *pginfo = pginfo_save;
- ret = -EAGAIN;
- } else if ((u64 *)hipzout.vaddr != iova_start) {
- ehca_err(&shca->ib_device, "PHYP changed iova_start in "
- "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p "
- "mr_handle=%llx lkey=%x lkey_out=%x", iova_start,
- hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
- e_mr->ib.ib_mr.lkey, hipzout.lkey);
- ret = -EFAULT;
- } else {
- /*
- * successful reregistration
- * note: start and start_out are identical for eServer HCAs
- */
- e_mr->num_kpages = pginfo->num_kpages;
- e_mr->num_hwpages = pginfo->num_hwpages;
- e_mr->hwpage_size = pginfo->hwpage_size;
- e_mr->start = iova_start;
- e_mr->size = size;
- e_mr->acl = acl;
- *lkey = hipzout.lkey;
- *rkey = hipzout.rkey;
- }
-
-ehca_rereg_mr_rereg1_exit1:
- ehca_free_fw_ctrlblock(kpage);
-ehca_rereg_mr_rereg1_exit0:
- if ( ret && (ret != -EAGAIN) )
- ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x "
- "pginfo=%p num_kpages=%llx num_hwpages=%llx",
- ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
- pginfo->num_hwpages);
- return ret;
-} /* end ehca_rereg_mr_rereg1() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_rereg_mr(struct ehca_shca *shca,
- struct ehca_mr *e_mr,
- u64 *iova_start,
- u64 size,
- int acl,
- struct ehca_pd *e_pd,
- struct ehca_mr_pginfo *pginfo,
- u32 *lkey,
- u32 *rkey)
-{
- int ret = 0;
- u64 h_ret;
- int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
- int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
-
- /* first determine reregistration hCall(s) */
- if ((pginfo->num_hwpages > MAX_RPAGES) ||
- (e_mr->num_hwpages > MAX_RPAGES) ||
- (pginfo->num_hwpages > e_mr->num_hwpages)) {
- ehca_dbg(&shca->ib_device, "Rereg3 case, "
- "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x",
- pginfo->num_hwpages, e_mr->num_hwpages);
- rereg_1_hcall = 0;
- rereg_3_hcall = 1;
- }
-
- if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
- rereg_1_hcall = 0;
- rereg_3_hcall = 1;
- e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
- ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
- e_mr);
- }
-
- if (rereg_1_hcall) {
- ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
- acl, e_pd, pginfo, lkey, rkey);
- if (ret) {
- if (ret == -EAGAIN)
- rereg_3_hcall = 1;
- else
- goto ehca_rereg_mr_exit0;
- }
- }
-
- if (rereg_3_hcall) {
- struct ehca_mr save_mr;
-
- /* first deregister old MR */
- h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "hipz_free_mr failed, "
- "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx "
- "mr->lkey=%x",
- h_ret, e_mr, shca->ipz_hca_handle.handle,
- e_mr->ipz_mr_handle.handle,
- e_mr->ib.ib_mr.lkey);
- ret = ehca2ib_return_code(h_ret);
- goto ehca_rereg_mr_exit0;
- }
- /* clean ehca_mr_t, without changing struct ib_mr and lock */
- save_mr = *e_mr;
- ehca_mr_deletenew(e_mr);
-
- /* set some MR values */
- e_mr->flags = save_mr.flags;
- e_mr->hwpage_size = save_mr.hwpage_size;
- e_mr->fmr_page_size = save_mr.fmr_page_size;
- e_mr->fmr_max_pages = save_mr.fmr_max_pages;
- e_mr->fmr_max_maps = save_mr.fmr_max_maps;
- e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
-
- ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
- e_pd, pginfo, lkey, rkey, EHCA_REG_MR);
- if (ret) {
- u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
- memcpy(&e_mr->flags, &(save_mr.flags),
- sizeof(struct ehca_mr) - offset);
- goto ehca_rereg_mr_exit0;
- }
- }
-
-ehca_rereg_mr_exit0:
- if (ret)
- ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
- "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
- "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x "
- "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
- acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
- rereg_1_hcall, rereg_3_hcall);
- return ret;
-} /* end ehca_rereg_mr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_unmap_one_fmr(struct ehca_shca *shca,
- struct ehca_mr *e_fmr)
-{
- int ret = 0;
- u64 h_ret;
- struct ehca_pd *e_pd =
- container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
- struct ehca_mr save_fmr;
- u32 tmp_lkey, tmp_rkey;
- struct ehca_mr_pginfo pginfo;
- struct ehca_mr_hipzout_parms hipzout;
- struct ehca_mr save_mr;
-
- if (e_fmr->fmr_max_pages <= MAX_RPAGES) {
- /*
- * note: after using rereg hcall with len=0,
- * rereg hcall must be used again for registering pages
- */
- h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
- 0, 0, e_pd->fw_pd, 0, &hipzout);
- if (h_ret == H_SUCCESS) {
- /* successful reregistration */
- e_fmr->start = NULL;
- e_fmr->size = 0;
- tmp_lkey = hipzout.lkey;
- tmp_rkey = hipzout.rkey;
- return 0;
- }
- /*
- * should not happen, because length checked above,
- * FMRs are not shared and no MW bound to FMRs
- */
- ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
- "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx "
- "mr_hndl=%llx lkey=%x lkey_out=%x",
- h_ret, e_fmr, shca->ipz_hca_handle.handle,
- e_fmr->ipz_mr_handle.handle,
- e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
- /* try free and rereg */
- }
-
- /* first free old FMR */
- h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "hipz_free_mr failed, "
- "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx "
- "lkey=%x",
- h_ret, e_fmr, shca->ipz_hca_handle.handle,
- e_fmr->ipz_mr_handle.handle,
- e_fmr->ib.ib_fmr.lkey);
- ret = ehca2ib_return_code(h_ret);
- goto ehca_unmap_one_fmr_exit0;
- }
- /* clean ehca_mr_t, without changing lock */
- save_fmr = *e_fmr;
- ehca_mr_deletenew(e_fmr);
-
- /* set some MR values */
- e_fmr->flags = save_fmr.flags;
- e_fmr->hwpage_size = save_fmr.hwpage_size;
- e_fmr->fmr_page_size = save_fmr.fmr_page_size;
- e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
- e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
- e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
- e_fmr->acl = save_fmr.acl;
-
- memset(&pginfo, 0, sizeof(pginfo));
- pginfo.type = EHCA_MR_PGI_FMR;
- ret = ehca_reg_mr(shca, e_fmr, NULL,
- (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
- e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
- &tmp_rkey, EHCA_REG_MR);
- if (ret) {
- u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
- memcpy(&e_fmr->flags, &(save_mr.flags),
- sizeof(struct ehca_mr) - offset);
- }
-
-ehca_unmap_one_fmr_exit0:
- if (ret)
- ehca_err(&shca->ib_device, "ret=%i tmp_lkey=%x tmp_rkey=%x "
- "fmr_max_pages=%x",
- ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages);
- return ret;
-} /* end ehca_unmap_one_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_reg_smr(struct ehca_shca *shca,
- struct ehca_mr *e_origmr,
- struct ehca_mr *e_newmr,
- u64 *iova_start,
- int acl,
- struct ehca_pd *e_pd,
- u32 *lkey, /*OUT*/
- u32 *rkey) /*OUT*/
-{
- int ret = 0;
- u64 h_ret;
- u32 hipz_acl;
- struct ehca_mr_hipzout_parms hipzout;
-
- ehca_mrmw_map_acl(acl, &hipz_acl);
- ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
-
- h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
- (u64)iova_start, hipz_acl, e_pd->fw_pd,
- &hipzout);
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
- "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
- "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
- h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
- shca->ipz_hca_handle.handle,
- e_origmr->ipz_mr_handle.handle,
- e_origmr->ib.ib_mr.lkey);
- ret = ehca2ib_return_code(h_ret);
- goto ehca_reg_smr_exit0;
- }
- /* successful registration */
- e_newmr->num_kpages = e_origmr->num_kpages;
- e_newmr->num_hwpages = e_origmr->num_hwpages;
- e_newmr->hwpage_size = e_origmr->hwpage_size;
- e_newmr->start = iova_start;
- e_newmr->size = e_origmr->size;
- e_newmr->acl = acl;
- e_newmr->ipz_mr_handle = hipzout.handle;
- *lkey = hipzout.lkey;
- *rkey = hipzout.rkey;
- return 0;
-
-ehca_reg_smr_exit0:
- if (ret)
- ehca_err(&shca->ib_device, "ret=%i shca=%p e_origmr=%p "
- "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
- ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
- return ret;
-} /* end ehca_reg_smr() */
-
-/*----------------------------------------------------------------------*/
-static inline void *ehca_calc_sectbase(int top, int dir, int idx)
-{
- unsigned long ret = idx;
- ret |= dir << EHCA_DIR_INDEX_SHIFT;
- ret |= top << EHCA_TOP_INDEX_SHIFT;
- return __va(ret << SECTION_SIZE_BITS);
-}
-
-#define ehca_bmap_valid(entry) \
- ((u64)entry != (u64)EHCA_INVAL_ADDR)
-
-static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage,
- struct ehca_shca *shca, struct ehca_mr *mr,
- struct ehca_mr_pginfo *pginfo)
-{
- u64 h_ret = 0;
- unsigned long page = 0;
- u64 rpage = __pa(kpage);
- int page_count;
-
- void *sectbase = ehca_calc_sectbase(top, dir, idx);
- if ((unsigned long)sectbase & (pginfo->hwpage_size - 1)) {
- ehca_err(&shca->ib_device, "reg_mr_section will probably fail:"
- "hwpage_size does not fit to "
- "section start address");
- }
- page_count = EHCA_SECTSIZE / pginfo->hwpage_size;
-
- while (page < page_count) {
- u64 rnum;
- for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count);
- rnum++) {
- void *pg = sectbase + ((page++) * pginfo->hwpage_size);
- kpage[rnum] = __pa(pg);
- }
-
- h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr,
- ehca_encode_hwpage_size(pginfo->hwpage_size),
- 0, rpage, rnum);
-
- if ((h_ret != H_SUCCESS) && (h_ret != H_PAGE_REGISTERED)) {
- ehca_err(&shca->ib_device, "register_rpage_mr failed");
- return h_ret;
- }
- }
- return h_ret;
-}
-
-static u64 ehca_reg_mr_sections(int top, int dir, u64 *kpage,
- struct ehca_shca *shca, struct ehca_mr *mr,
- struct ehca_mr_pginfo *pginfo)
-{
- u64 hret = H_SUCCESS;
- int idx;
-
- for (idx = 0; idx < EHCA_MAP_ENTRIES; idx++) {
- if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]->ent[idx]))
- continue;
-
- hret = ehca_reg_mr_section(top, dir, idx, kpage, shca, mr,
- pginfo);
- if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
- return hret;
- }
- return hret;
-}
-
-static u64 ehca_reg_mr_dir_sections(int top, u64 *kpage, struct ehca_shca *shca,
- struct ehca_mr *mr,
- struct ehca_mr_pginfo *pginfo)
-{
- u64 hret = H_SUCCESS;
- int dir;
-
- for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
- if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
- continue;
-
- hret = ehca_reg_mr_sections(top, dir, kpage, shca, mr, pginfo);
- if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
- return hret;
- }
- return hret;
-}
-
-/* register internal max-MR to internal SHCA */
-int ehca_reg_internal_maxmr(
- struct ehca_shca *shca,
- struct ehca_pd *e_pd,
- struct ehca_mr **e_maxmr) /*OUT*/
-{
- int ret;
- struct ehca_mr *e_mr;
- u64 *iova_start;
- u64 size_maxmr;
- struct ehca_mr_pginfo pginfo;
- u32 num_kpages;
- u32 num_hwpages;
- u64 hw_pgsize;
-
- if (!ehca_bmap) {
- ret = -EFAULT;
- goto ehca_reg_internal_maxmr_exit0;
- }
-
- e_mr = ehca_mr_new();
- if (!e_mr) {
- ehca_err(&shca->ib_device, "out of memory");
- ret = -ENOMEM;
- goto ehca_reg_internal_maxmr_exit0;
- }
- e_mr->flags |= EHCA_MR_FLAG_MAXMR;
-
- /* register internal max-MR on HCA */
- size_maxmr = ehca_mr_len;
- iova_start = (u64 *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START));
- num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
- PAGE_SIZE);
- hw_pgsize = ehca_get_max_hwpage_size(shca);
- num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size_maxmr,
- hw_pgsize);
-
- memset(&pginfo, 0, sizeof(pginfo));
- pginfo.type = EHCA_MR_PGI_PHYS;
- pginfo.num_kpages = num_kpages;
- pginfo.num_hwpages = num_hwpages;
- pginfo.hwpage_size = hw_pgsize;
- pginfo.u.phy.addr = 0;
- pginfo.u.phy.size = size_maxmr;
-
- ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
- &pginfo, &e_mr->ib.ib_mr.lkey,
- &e_mr->ib.ib_mr.rkey, EHCA_REG_BUSMAP_MR);
- if (ret) {
- ehca_err(&shca->ib_device, "reg of internal max MR failed, "
- "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
- "num_hwpages=%x", e_mr, iova_start, size_maxmr,
- num_kpages, num_hwpages);
- goto ehca_reg_internal_maxmr_exit1;
- }
-
- /* successful registration of all pages */
- e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
- e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
- e_mr->ib.ib_mr.uobject = NULL;
- atomic_inc(&(e_pd->ib_pd.usecnt));
- *e_maxmr = e_mr;
- return 0;
-
-ehca_reg_internal_maxmr_exit1:
- ehca_mr_delete(e_mr);
-ehca_reg_internal_maxmr_exit0:
- if (ret)
- ehca_err(&shca->ib_device, "ret=%i shca=%p e_pd=%p e_maxmr=%p",
- ret, shca, e_pd, e_maxmr);
- return ret;
-} /* end ehca_reg_internal_maxmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_reg_maxmr(struct ehca_shca *shca,
- struct ehca_mr *e_newmr,
- u64 *iova_start,
- int acl,
- struct ehca_pd *e_pd,
- u32 *lkey,
- u32 *rkey)
-{
- u64 h_ret;
- struct ehca_mr *e_origmr = shca->maxmr;
- u32 hipz_acl;
- struct ehca_mr_hipzout_parms hipzout;
-
- ehca_mrmw_map_acl(acl, &hipz_acl);
- ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
-
- h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
- (u64)iova_start, hipz_acl, e_pd->fw_pd,
- &hipzout);
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
- "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
- h_ret, e_origmr, shca->ipz_hca_handle.handle,
- e_origmr->ipz_mr_handle.handle,
- e_origmr->ib.ib_mr.lkey);
- return ehca2ib_return_code(h_ret);
- }
- /* successful registration */
- e_newmr->num_kpages = e_origmr->num_kpages;
- e_newmr->num_hwpages = e_origmr->num_hwpages;
- e_newmr->hwpage_size = e_origmr->hwpage_size;
- e_newmr->start = iova_start;
- e_newmr->size = e_origmr->size;
- e_newmr->acl = acl;
- e_newmr->ipz_mr_handle = hipzout.handle;
- *lkey = hipzout.lkey;
- *rkey = hipzout.rkey;
- return 0;
-} /* end ehca_reg_maxmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
-{
- int ret;
- struct ehca_mr *e_maxmr;
- struct ib_pd *ib_pd;
-
- if (!shca->maxmr) {
- ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
- ret = -EINVAL;
- goto ehca_dereg_internal_maxmr_exit0;
- }
-
- e_maxmr = shca->maxmr;
- ib_pd = e_maxmr->ib.ib_mr.pd;
- shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
-
- ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
- if (ret) {
- ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
- "ret=%i e_maxmr=%p shca=%p lkey=%x",
- ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
- shca->maxmr = e_maxmr;
- goto ehca_dereg_internal_maxmr_exit0;
- }
-
- atomic_dec(&ib_pd->usecnt);
-
-ehca_dereg_internal_maxmr_exit0:
- if (ret)
- ehca_err(&shca->ib_device, "ret=%i shca=%p shca->maxmr=%p",
- ret, shca, shca->maxmr);
- return ret;
-} /* end ehca_dereg_internal_maxmr() */
-
-/*----------------------------------------------------------------------*/
-
-/* check page list of map FMR verb for validness */
-int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
- u64 *page_list,
- int list_len)
-{
- u32 i;
- u64 *page;
-
- if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
- ehca_gen_err("bad list_len, list_len=%x "
- "e_fmr->fmr_max_pages=%x fmr=%p",
- list_len, e_fmr->fmr_max_pages, e_fmr);
- return -EINVAL;
- }
-
- /* each page must be aligned */
- page = page_list;
- for (i = 0; i < list_len; i++) {
- if (*page % e_fmr->fmr_page_size) {
- ehca_gen_err("bad page, i=%x *page=%llx page=%p fmr=%p "
- "fmr_page_size=%x", i, *page, page, e_fmr,
- e_fmr->fmr_page_size);
- return -EINVAL;
- }
- page++;
- }
-
- return 0;
-} /* end ehca_fmr_check_page_list() */
-
-/*----------------------------------------------------------------------*/
-
-/* PAGE_SIZE >= pginfo->hwpage_size */
-static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
- u32 number,
- u64 *kpage)
-{
- int ret = 0;
- u64 pgaddr;
- u32 j = 0;
- int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size;
- struct scatterlist **sg = &pginfo->u.usr.next_sg;
-
- while (*sg != NULL) {
- pgaddr = page_to_pfn(sg_page(*sg))
- << PAGE_SHIFT;
- *kpage = pgaddr + (pginfo->next_hwpage *
- pginfo->hwpage_size);
- if (!(*kpage)) {
- ehca_gen_err("pgaddr=%llx "
- "sg_dma_address=%llx "
- "entry=%llx next_hwpage=%llx",
- pgaddr, (u64)sg_dma_address(*sg),
- pginfo->u.usr.next_nmap,
- pginfo->next_hwpage);
- return -EFAULT;
- }
- (pginfo->hwpage_cnt)++;
- (pginfo->next_hwpage)++;
- kpage++;
- if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
- (pginfo->kpage_cnt)++;
- (pginfo->u.usr.next_nmap)++;
- pginfo->next_hwpage = 0;
- *sg = sg_next(*sg);
- }
- j++;
- if (j >= number)
- break;
- }
-
- return ret;
-}
-
-/*
- * check given pages for contiguous layout
- * last page addr is returned in prev_pgaddr for further check
- */
-static int ehca_check_kpages_per_ate(struct scatterlist **sg,
- int num_pages,
- u64 *prev_pgaddr)
-{
- for (; *sg && num_pages > 0; *sg = sg_next(*sg), num_pages--) {
- u64 pgaddr = page_to_pfn(sg_page(*sg)) << PAGE_SHIFT;
- if (ehca_debug_level >= 3)
- ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr,
- *(u64 *)__va(pgaddr));
- if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
- ehca_gen_err("uncontiguous page found pgaddr=%llx "
- "prev_pgaddr=%llx entries_left_in_hwpage=%x",
- pgaddr, *prev_pgaddr, num_pages);
- return -EINVAL;
- }
- *prev_pgaddr = pgaddr;
- }
- return 0;
-}
-
-/* PAGE_SIZE < pginfo->hwpage_size */
-static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
- u32 number,
- u64 *kpage)
-{
- int ret = 0;
- u64 pgaddr, prev_pgaddr;
- u32 j = 0;
- int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
- int nr_kpages = kpages_per_hwpage;
- struct scatterlist **sg = &pginfo->u.usr.next_sg;
-
- while (*sg != NULL) {
-
- if (nr_kpages == kpages_per_hwpage) {
- pgaddr = (page_to_pfn(sg_page(*sg))
- << PAGE_SHIFT);
- *kpage = pgaddr;
- if (!(*kpage)) {
- ehca_gen_err("pgaddr=%llx entry=%llx",
- pgaddr, pginfo->u.usr.next_nmap);
- ret = -EFAULT;
- return ret;
- }
- /*
- * The first page in a hwpage must be aligned;
- * the first MR page is exempt from this rule.
- */
- if (pgaddr & (pginfo->hwpage_size - 1)) {
- if (pginfo->hwpage_cnt) {
- ehca_gen_err(
- "invalid alignment "
- "pgaddr=%llx entry=%llx "
- "mr_pgsize=%llx",
- pgaddr, pginfo->u.usr.next_nmap,
- pginfo->hwpage_size);
- ret = -EFAULT;
- return ret;
- }
- /* first MR page */
- pginfo->kpage_cnt =
- (pgaddr &
- (pginfo->hwpage_size - 1)) >>
- PAGE_SHIFT;
- nr_kpages -= pginfo->kpage_cnt;
- *kpage = pgaddr &
- ~(pginfo->hwpage_size - 1);
- }
- if (ehca_debug_level >= 3) {
- u64 val = *(u64 *)__va(pgaddr);
- ehca_gen_dbg("kpage=%llx page=%llx "
- "value=%016llx",
- *kpage, pgaddr, val);
- }
- prev_pgaddr = pgaddr;
- *sg = sg_next(*sg);
- pginfo->kpage_cnt++;
- pginfo->u.usr.next_nmap++;
- nr_kpages--;
- if (!nr_kpages)
- goto next_kpage;
- continue;
- }
-
- ret = ehca_check_kpages_per_ate(sg, nr_kpages,
- &prev_pgaddr);
- if (ret)
- return ret;
- pginfo->kpage_cnt += nr_kpages;
- pginfo->u.usr.next_nmap += nr_kpages;
-
-next_kpage:
- nr_kpages = kpages_per_hwpage;
- (pginfo->hwpage_cnt)++;
- kpage++;
- j++;
- if (j >= number)
- break;
- }
-
- return ret;
-}
-
-static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
- u32 number, u64 *kpage)
-{
- int ret = 0;
- u64 addr = pginfo->u.phy.addr;
- u64 size = pginfo->u.phy.size;
- u64 num_hw, offs_hw;
- u32 i = 0;
-
- num_hw = NUM_CHUNKS((addr % pginfo->hwpage_size) + size,
- pginfo->hwpage_size);
- offs_hw = (addr & ~(pginfo->hwpage_size - 1)) / pginfo->hwpage_size;
-
- while (pginfo->next_hwpage < offs_hw + num_hw) {
- /* sanity check */
- if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
- (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
- ehca_gen_err("kpage_cnt >= num_kpages, "
- "kpage_cnt=%llx num_kpages=%llx "
- "hwpage_cnt=%llx "
- "num_hwpages=%llx i=%x",
- pginfo->kpage_cnt,
- pginfo->num_kpages,
- pginfo->hwpage_cnt,
- pginfo->num_hwpages, i);
- return -EFAULT;
- }
- *kpage = (addr & ~(pginfo->hwpage_size - 1)) +
- (pginfo->next_hwpage * pginfo->hwpage_size);
- if ( !(*kpage) && addr ) {
- ehca_gen_err("addr=%llx size=%llx "
- "next_hwpage=%llx", addr,
- size, pginfo->next_hwpage);
- return -EFAULT;
- }
- (pginfo->hwpage_cnt)++;
- (pginfo->next_hwpage)++;
- if (PAGE_SIZE >= pginfo->hwpage_size) {
- if (pginfo->next_hwpage %
- (PAGE_SIZE / pginfo->hwpage_size) == 0)
- (pginfo->kpage_cnt)++;
- } else
- pginfo->kpage_cnt += pginfo->hwpage_size /
- PAGE_SIZE;
- kpage++;
- i++;
- if (i >= number) break;
- }
- if (pginfo->next_hwpage >= offs_hw + num_hw) {
- pginfo->next_hwpage = 0;
- }
-
- return ret;
-}
-
-static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
- u32 number, u64 *kpage)
-{
- int ret = 0;
- u64 *fmrlist;
- u32 i;
-
- /* loop over desired page_list entries */
- fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
- for (i = 0; i < number; i++) {
- *kpage = (*fmrlist & ~(pginfo->hwpage_size - 1)) +
- pginfo->next_hwpage * pginfo->hwpage_size;
- if ( !(*kpage) ) {
- ehca_gen_err("*fmrlist=%llx fmrlist=%p "
- "next_listelem=%llx next_hwpage=%llx",
- *fmrlist, fmrlist,
- pginfo->u.fmr.next_listelem,
- pginfo->next_hwpage);
- return -EFAULT;
- }
- (pginfo->hwpage_cnt)++;
- if (pginfo->u.fmr.fmr_pgsize >= pginfo->hwpage_size) {
- if (pginfo->next_hwpage %
- (pginfo->u.fmr.fmr_pgsize /
- pginfo->hwpage_size) == 0) {
- (pginfo->kpage_cnt)++;
- (pginfo->u.fmr.next_listelem)++;
- fmrlist++;
- pginfo->next_hwpage = 0;
- } else
- (pginfo->next_hwpage)++;
- } else {
- unsigned int cnt_per_hwpage = pginfo->hwpage_size /
- pginfo->u.fmr.fmr_pgsize;
- unsigned int j;
- u64 prev = *kpage;
- /* check if adrs are contiguous */
- for (j = 1; j < cnt_per_hwpage; j++) {
- u64 p = fmrlist[j] & ~(pginfo->hwpage_size - 1);
- if (prev + pginfo->u.fmr.fmr_pgsize != p) {
- ehca_gen_err("uncontiguous fmr pages "
- "found prev=%llx p=%llx "
- "idx=%x", prev, p, i + j);
- return -EINVAL;
- }
- prev = p;
- }
- pginfo->kpage_cnt += cnt_per_hwpage;
- pginfo->u.fmr.next_listelem += cnt_per_hwpage;
- fmrlist += cnt_per_hwpage;
- }
- kpage++;
- }
- return ret;
-}
-
-/* setup page buffer from page info */
-int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
- u32 number,
- u64 *kpage)
-{
- int ret;
-
- switch (pginfo->type) {
- case EHCA_MR_PGI_PHYS:
- ret = ehca_set_pagebuf_phys(pginfo, number, kpage);
- break;
- case EHCA_MR_PGI_USER:
- ret = PAGE_SIZE >= pginfo->hwpage_size ?
- ehca_set_pagebuf_user1(pginfo, number, kpage) :
- ehca_set_pagebuf_user2(pginfo, number, kpage);
- break;
- case EHCA_MR_PGI_FMR:
- ret = ehca_set_pagebuf_fmr(pginfo, number, kpage);
- break;
- default:
- ehca_gen_err("bad pginfo->type=%x", pginfo->type);
- ret = -EFAULT;
- break;
- }
- return ret;
-} /* end ehca_set_pagebuf() */
-
-/*----------------------------------------------------------------------*/
-
-/*
- * check MR if it is a max-MR, i.e. uses whole memory
- * in case it's a max-MR 1 is returned, else 0
- */
-int ehca_mr_is_maxmr(u64 size,
- u64 *iova_start)
-{
- /* a MR is treated as max-MR only if it fits following: */
- if ((size == ehca_mr_len) &&
- (iova_start == (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)))) {
- ehca_gen_dbg("this is a max-MR");
- return 1;
- } else
- return 0;
-} /* end ehca_mr_is_maxmr() */
-
-/*----------------------------------------------------------------------*/
-
-/* map access control for MR/MW. This routine is used for MR and MW. */
-void ehca_mrmw_map_acl(int ib_acl,
- u32 *hipz_acl)
-{
- *hipz_acl = 0;
- if (ib_acl & IB_ACCESS_REMOTE_READ)
- *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
- if (ib_acl & IB_ACCESS_REMOTE_WRITE)
- *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
- if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
- *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
- if (ib_acl & IB_ACCESS_LOCAL_WRITE)
- *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
- if (ib_acl & IB_ACCESS_MW_BIND)
- *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
-} /* end ehca_mrmw_map_acl() */
-
-/*----------------------------------------------------------------------*/
-
-/* sets page size in hipz access control for MR/MW. */
-void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl) /*INOUT*/
-{
- *hipz_acl |= (ehca_encode_hwpage_size(pgsize) << 24);
-} /* end ehca_mrmw_set_pgsize_hipz_acl() */
-
-/*----------------------------------------------------------------------*/
-
-/*
- * reverse map access control for MR/MW.
- * This routine is used for MR and MW.
- */
-void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
- int *ib_acl) /*OUT*/
-{
- *ib_acl = 0;
- if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
- *ib_acl |= IB_ACCESS_REMOTE_READ;
- if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
- *ib_acl |= IB_ACCESS_REMOTE_WRITE;
- if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
- *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
- if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
- *ib_acl |= IB_ACCESS_LOCAL_WRITE;
- if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
- *ib_acl |= IB_ACCESS_MW_BIND;
-} /* end ehca_mrmw_reverse_map_acl() */
-
-
-/*----------------------------------------------------------------------*/
-
-/*
- * MR destructor and constructor
- * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
- * except struct ib_mr and spinlock
- */
-void ehca_mr_deletenew(struct ehca_mr *mr)
-{
- mr->flags = 0;
- mr->num_kpages = 0;
- mr->num_hwpages = 0;
- mr->acl = 0;
- mr->start = NULL;
- mr->fmr_page_size = 0;
- mr->fmr_max_pages = 0;
- mr->fmr_max_maps = 0;
- mr->fmr_map_cnt = 0;
- memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
- memset(&mr->galpas, 0, sizeof(mr->galpas));
-} /* end ehca_mr_deletenew() */
-
-int ehca_init_mrmw_cache(void)
-{
- mr_cache = kmem_cache_create("ehca_cache_mr",
- sizeof(struct ehca_mr), 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!mr_cache)
- return -ENOMEM;
- mw_cache = kmem_cache_create("ehca_cache_mw",
- sizeof(struct ehca_mw), 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!mw_cache) {
- kmem_cache_destroy(mr_cache);
- mr_cache = NULL;
- return -ENOMEM;
- }
- return 0;
-}
-
-void ehca_cleanup_mrmw_cache(void)
-{
- kmem_cache_destroy(mr_cache);
- kmem_cache_destroy(mw_cache);
-}
-
-static inline int ehca_init_top_bmap(struct ehca_top_bmap *ehca_top_bmap,
- int dir)
-{
- if (!ehca_bmap_valid(ehca_top_bmap->dir[dir])) {
- ehca_top_bmap->dir[dir] =
- kmalloc(sizeof(struct ehca_dir_bmap), GFP_KERNEL);
- if (!ehca_top_bmap->dir[dir])
- return -ENOMEM;
- /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
- memset(ehca_top_bmap->dir[dir], 0xFF, EHCA_ENT_MAP_SIZE);
- }
- return 0;
-}
-
-static inline int ehca_init_bmap(struct ehca_bmap *ehca_bmap, int top, int dir)
-{
- if (!ehca_bmap_valid(ehca_bmap->top[top])) {
- ehca_bmap->top[top] =
- kmalloc(sizeof(struct ehca_top_bmap), GFP_KERNEL);
- if (!ehca_bmap->top[top])
- return -ENOMEM;
- /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
- memset(ehca_bmap->top[top], 0xFF, EHCA_DIR_MAP_SIZE);
- }
- return ehca_init_top_bmap(ehca_bmap->top[top], dir);
-}
-
-static inline int ehca_calc_index(unsigned long i, unsigned long s)
-{
- return (i >> s) & EHCA_INDEX_MASK;
-}
-
-void ehca_destroy_busmap(void)
-{
- int top, dir;
-
- if (!ehca_bmap)
- return;
-
- for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
- if (!ehca_bmap_valid(ehca_bmap->top[top]))
- continue;
- for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
- if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
- continue;
-
- kfree(ehca_bmap->top[top]->dir[dir]);
- }
-
- kfree(ehca_bmap->top[top]);
- }
-
- kfree(ehca_bmap);
- ehca_bmap = NULL;
-}
-
-static int ehca_update_busmap(unsigned long pfn, unsigned long nr_pages)
-{
- unsigned long i, start_section, end_section;
- int top, dir, idx;
-
- if (!nr_pages)
- return 0;
-
- if (!ehca_bmap) {
- ehca_bmap = kmalloc(sizeof(struct ehca_bmap), GFP_KERNEL);
- if (!ehca_bmap)
- return -ENOMEM;
- /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
- memset(ehca_bmap, 0xFF, EHCA_TOP_MAP_SIZE);
- }
-
- start_section = (pfn * PAGE_SIZE) / EHCA_SECTSIZE;
- end_section = ((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE;
- for (i = start_section; i < end_section; i++) {
- int ret;
- top = ehca_calc_index(i, EHCA_TOP_INDEX_SHIFT);
- dir = ehca_calc_index(i, EHCA_DIR_INDEX_SHIFT);
- idx = i & EHCA_INDEX_MASK;
-
- ret = ehca_init_bmap(ehca_bmap, top, dir);
- if (ret) {
- ehca_destroy_busmap();
- return ret;
- }
- ehca_bmap->top[top]->dir[dir]->ent[idx] = ehca_mr_len;
- ehca_mr_len += EHCA_SECTSIZE;
- }
- return 0;
-}
-
-static int ehca_is_hugepage(unsigned long pfn)
-{
- int page_order;
-
- if (pfn & EHCA_HUGEPAGE_PFN_MASK)
- return 0;
-
- page_order = compound_order(pfn_to_page(pfn));
- if (page_order + PAGE_SHIFT != EHCA_HUGEPAGESHIFT)
- return 0;
-
- return 1;
-}
-
-static int ehca_create_busmap_callback(unsigned long initial_pfn,
- unsigned long total_nr_pages, void *arg)
-{
- int ret;
- unsigned long pfn, start_pfn, end_pfn, nr_pages;
-
- if ((total_nr_pages * PAGE_SIZE) < EHCA_HUGEPAGE_SIZE)
- return ehca_update_busmap(initial_pfn, total_nr_pages);
-
- /* Given chunk is >= 16GB -> check for hugepages */
- start_pfn = initial_pfn;
- end_pfn = initial_pfn + total_nr_pages;
- pfn = start_pfn;
-
- while (pfn < end_pfn) {
- if (ehca_is_hugepage(pfn)) {
- /* Add mem found in front of the hugepage */
- nr_pages = pfn - start_pfn;
- ret = ehca_update_busmap(start_pfn, nr_pages);
- if (ret)
- return ret;
- /* Skip the hugepage */
- pfn += (EHCA_HUGEPAGE_SIZE / PAGE_SIZE);
- start_pfn = pfn;
- } else
- pfn += (EHCA_SECTSIZE / PAGE_SIZE);
- }
-
- /* Add mem found behind the hugepage(s) */
- nr_pages = pfn - start_pfn;
- return ehca_update_busmap(start_pfn, nr_pages);
-}
-
-int ehca_create_busmap(void)
-{
- int ret;
-
- ehca_mr_len = 0;
- ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
- ehca_create_busmap_callback);
- return ret;
-}
-
-static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
- struct ehca_mr *e_mr,
- struct ehca_mr_pginfo *pginfo)
-{
- int top;
- u64 hret, *kpage;
-
- kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
- if (!kpage) {
- ehca_err(&shca->ib_device, "kpage alloc failed");
- return -ENOMEM;
- }
- for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
- if (!ehca_bmap_valid(ehca_bmap->top[top]))
- continue;
- hret = ehca_reg_mr_dir_sections(top, kpage, shca, e_mr, pginfo);
- if ((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
- break;
- }
-
- ehca_free_fw_ctrlblock(kpage);
-
- if (hret == H_SUCCESS)
- return 0; /* Everything is fine */
- else {
- ehca_err(&shca->ib_device, "ehca_reg_bmap_mr_rpages failed, "
- "h_ret=%lli e_mr=%p top=%x lkey=%x "
- "hca_hndl=%llx mr_hndl=%llx", hret, e_mr, top,
- e_mr->ib.ib_mr.lkey,
- shca->ipz_hca_handle.handle,
- e_mr->ipz_mr_handle.handle);
- return ehca2ib_return_code(hret);
- }
-}
-
-static u64 ehca_map_vaddr(void *caddr)
-{
- int top, dir, idx;
- unsigned long abs_addr, offset;
- u64 entry;
-
- if (!ehca_bmap)
- return EHCA_INVAL_ADDR;
-
- abs_addr = __pa(caddr);
- top = ehca_calc_index(abs_addr, EHCA_TOP_INDEX_SHIFT + EHCA_SECTSHIFT);
- if (!ehca_bmap_valid(ehca_bmap->top[top]))
- return EHCA_INVAL_ADDR;
-
- dir = ehca_calc_index(abs_addr, EHCA_DIR_INDEX_SHIFT + EHCA_SECTSHIFT);
- if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
- return EHCA_INVAL_ADDR;
-
- idx = ehca_calc_index(abs_addr, EHCA_SECTSHIFT);
-
- entry = ehca_bmap->top[top]->dir[dir]->ent[idx];
- if (ehca_bmap_valid(entry)) {
- offset = (unsigned long)caddr & (EHCA_SECTSIZE - 1);
- return entry | offset;
- } else
- return EHCA_INVAL_ADDR;
-}
-
-static int ehca_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
-{
- return dma_addr == EHCA_INVAL_ADDR;
-}
-
-static u64 ehca_dma_map_single(struct ib_device *dev, void *cpu_addr,
- size_t size, enum dma_data_direction direction)
-{
- if (cpu_addr)
- return ehca_map_vaddr(cpu_addr);
- else
- return EHCA_INVAL_ADDR;
-}
-
-static void ehca_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
- enum dma_data_direction direction)
-{
- /* This is only a stub; nothing to be done here */
-}
-
-static u64 ehca_dma_map_page(struct ib_device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- u64 addr;
-
- if (offset + size > PAGE_SIZE)
- return EHCA_INVAL_ADDR;
-
- addr = ehca_map_vaddr(page_address(page));
- if (!ehca_dma_mapping_error(dev, addr))
- addr += offset;
-
- return addr;
-}
-
-static void ehca_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
- enum dma_data_direction direction)
-{
- /* This is only a stub; nothing to be done here */
-}
-
-static int ehca_dma_map_sg(struct ib_device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction direction)
-{
- struct scatterlist *sg;
- int i;
-
- for_each_sg(sgl, sg, nents, i) {
- u64 addr;
- addr = ehca_map_vaddr(sg_virt(sg));
- if (ehca_dma_mapping_error(dev, addr))
- return 0;
-
- sg->dma_address = addr;
- sg->dma_length = sg->length;
- }
- return nents;
-}
-
-static void ehca_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
-{
- /* This is only a stub; nothing to be done here */
-}
-
-static void ehca_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr,
- size_t size,
- enum dma_data_direction dir)
-{
- dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
-}
-
-static void ehca_dma_sync_single_for_device(struct ib_device *dev, u64 addr,
- size_t size,
- enum dma_data_direction dir)
-{
- dma_sync_single_for_device(dev->dma_device, addr, size, dir);
-}
-
-static void *ehca_dma_alloc_coherent(struct ib_device *dev, size_t size,
- u64 *dma_handle, gfp_t flag)
-{
- struct page *p;
- void *addr = NULL;
- u64 dma_addr;
-
- p = alloc_pages(flag, get_order(size));
- if (p) {
- addr = page_address(p);
- dma_addr = ehca_map_vaddr(addr);
- if (ehca_dma_mapping_error(dev, dma_addr)) {
- free_pages((unsigned long)addr, get_order(size));
- return NULL;
- }
- if (dma_handle)
- *dma_handle = dma_addr;
- return addr;
- }
- return NULL;
-}
-
-static void ehca_dma_free_coherent(struct ib_device *dev, size_t size,
- void *cpu_addr, u64 dma_handle)
-{
- if (cpu_addr && size)
- free_pages((unsigned long)cpu_addr, get_order(size));
-}
-
-
-struct ib_dma_mapping_ops ehca_dma_mapping_ops = {
- .mapping_error = ehca_dma_mapping_error,
- .map_single = ehca_dma_map_single,
- .unmap_single = ehca_dma_unmap_single,
- .map_page = ehca_dma_map_page,
- .unmap_page = ehca_dma_unmap_page,
- .map_sg = ehca_dma_map_sg,
- .unmap_sg = ehca_dma_unmap_sg,
- .sync_single_for_cpu = ehca_dma_sync_single_for_cpu,
- .sync_single_for_device = ehca_dma_sync_single_for_device,
- .alloc_coherent = ehca_dma_alloc_coherent,
- .free_coherent = ehca_dma_free_coherent,
-};
diff --git a/drivers/staging/rdma/ehca/ehca_mrmw.h b/drivers/staging/rdma/ehca/ehca_mrmw.h
deleted file mode 100644
index 52bfa95697f7..000000000000
--- a/drivers/staging/rdma/ehca/ehca_mrmw.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * MR/MW declarations and inline functions
- *
- * Authors: Dietmar Decker <ddecker@de.ibm.com>
- * Christoph Raisch <raisch@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _EHCA_MRMW_H_
-#define _EHCA_MRMW_H_
-
-enum ehca_reg_type {
- EHCA_REG_MR,
- EHCA_REG_BUSMAP_MR
-};
-
-int ehca_reg_mr(struct ehca_shca *shca,
- struct ehca_mr *e_mr,
- u64 *iova_start,
- u64 size,
- int acl,
- struct ehca_pd *e_pd,
- struct ehca_mr_pginfo *pginfo,
- u32 *lkey,
- u32 *rkey,
- enum ehca_reg_type reg_type);
-
-int ehca_reg_mr_rpages(struct ehca_shca *shca,
- struct ehca_mr *e_mr,
- struct ehca_mr_pginfo *pginfo);
-
-int ehca_rereg_mr(struct ehca_shca *shca,
- struct ehca_mr *e_mr,
- u64 *iova_start,
- u64 size,
- int mr_access_flags,
- struct ehca_pd *e_pd,
- struct ehca_mr_pginfo *pginfo,
- u32 *lkey,
- u32 *rkey);
-
-int ehca_unmap_one_fmr(struct ehca_shca *shca,
- struct ehca_mr *e_fmr);
-
-int ehca_reg_smr(struct ehca_shca *shca,
- struct ehca_mr *e_origmr,
- struct ehca_mr *e_newmr,
- u64 *iova_start,
- int acl,
- struct ehca_pd *e_pd,
- u32 *lkey,
- u32 *rkey);
-
-int ehca_reg_internal_maxmr(struct ehca_shca *shca,
- struct ehca_pd *e_pd,
- struct ehca_mr **maxmr);
-
-int ehca_reg_maxmr(struct ehca_shca *shca,
- struct ehca_mr *e_newmr,
- u64 *iova_start,
- int acl,
- struct ehca_pd *e_pd,
- u32 *lkey,
- u32 *rkey);
-
-int ehca_dereg_internal_maxmr(struct ehca_shca *shca);
-
-int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
- u64 *page_list,
- int list_len);
-
-int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
- u32 number,
- u64 *kpage);
-
-int ehca_mr_is_maxmr(u64 size,
- u64 *iova_start);
-
-void ehca_mrmw_map_acl(int ib_acl,
- u32 *hipz_acl);
-
-void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl);
-
-void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
- int *ib_acl);
-
-void ehca_mr_deletenew(struct ehca_mr *mr);
-
-int ehca_create_busmap(void);
-
-void ehca_destroy_busmap(void);
-
-extern struct ib_dma_mapping_ops ehca_dma_mapping_ops;
-#endif /*_EHCA_MRMW_H_*/
diff --git a/drivers/staging/rdma/ehca/ehca_pd.c b/drivers/staging/rdma/ehca/ehca_pd.c
deleted file mode 100644
index 2a8aae411941..000000000000
--- a/drivers/staging/rdma/ehca/ehca_pd.c
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * PD functions
- *
- * Authors: Christoph Raisch <raisch@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_tools.h"
-#include "ehca_iverbs.h"
-
-static struct kmem_cache *pd_cache;
-
-struct ib_pd *ehca_alloc_pd(struct ib_device *device,
- struct ib_ucontext *context, struct ib_udata *udata)
-{
- struct ehca_pd *pd;
- int i;
-
- pd = kmem_cache_zalloc(pd_cache, GFP_KERNEL);
- if (!pd) {
- ehca_err(device, "device=%p context=%p out of memory",
- device, context);
- return ERR_PTR(-ENOMEM);
- }
-
- for (i = 0; i < 2; i++) {
- INIT_LIST_HEAD(&pd->free[i]);
- INIT_LIST_HEAD(&pd->full[i]);
- }
- mutex_init(&pd->lock);
-
- /*
- * Kernel PD: when device = -1, 0
- * User PD: when context != -1
- */
- if (!context) {
- /*
- * Kernel PDs after init reuses always
- * the one created in ehca_shca_reopen()
- */
- struct ehca_shca *shca = container_of(device, struct ehca_shca,
- ib_device);
- pd->fw_pd.value = shca->pd->fw_pd.value;
- } else
- pd->fw_pd.value = (u64)pd;
-
- return &pd->ib_pd;
-}
-
-int ehca_dealloc_pd(struct ib_pd *pd)
-{
- struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
- int i, leftovers = 0;
- struct ipz_small_queue_page *page, *tmp;
-
- for (i = 0; i < 2; i++) {
- list_splice(&my_pd->full[i], &my_pd->free[i]);
- list_for_each_entry_safe(page, tmp, &my_pd->free[i], list) {
- leftovers = 1;
- free_page(page->page);
- kmem_cache_free(small_qp_cache, page);
- }
- }
-
- if (leftovers)
- ehca_warn(pd->device,
- "Some small queue pages were not freed");
-
- kmem_cache_free(pd_cache, my_pd);
-
- return 0;
-}
-
-int ehca_init_pd_cache(void)
-{
- pd_cache = kmem_cache_create("ehca_cache_pd",
- sizeof(struct ehca_pd), 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!pd_cache)
- return -ENOMEM;
- return 0;
-}
-
-void ehca_cleanup_pd_cache(void)
-{
- kmem_cache_destroy(pd_cache);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_qes.h b/drivers/staging/rdma/ehca/ehca_qes.h
deleted file mode 100644
index 90c4efa67586..000000000000
--- a/drivers/staging/rdma/ehca/ehca_qes.h
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * Hardware request structures
- *
- * Authors: Waleri Fomin <fomin@de.ibm.com>
- * Reinhard Ernst <rernst@de.ibm.com>
- * Christoph Raisch <raisch@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#ifndef _EHCA_QES_H_
-#define _EHCA_QES_H_
-
-#include "ehca_tools.h"
-
-/* virtual scatter gather entry to specify remote addresses with length */
-struct ehca_vsgentry {
- u64 vaddr;
- u32 lkey;
- u32 length;
-};
-
-#define GRH_FLAG_MASK EHCA_BMASK_IBM( 7, 7)
-#define GRH_IPVERSION_MASK EHCA_BMASK_IBM( 0, 3)
-#define GRH_TCLASS_MASK EHCA_BMASK_IBM( 4, 12)
-#define GRH_FLOWLABEL_MASK EHCA_BMASK_IBM(13, 31)
-#define GRH_PAYLEN_MASK EHCA_BMASK_IBM(32, 47)
-#define GRH_NEXTHEADER_MASK EHCA_BMASK_IBM(48, 55)
-#define GRH_HOPLIMIT_MASK EHCA_BMASK_IBM(56, 63)
-
-/*
- * Unreliable Datagram Address Vector Format
- * see IBTA Vol1 chapter 8.3 Global Routing Header
- */
-struct ehca_ud_av {
- u8 sl;
- u8 lnh;
- u16 dlid;
- u8 reserved1;
- u8 reserved2;
- u8 reserved3;
- u8 slid_path_bits;
- u8 reserved4;
- u8 ipd;
- u8 reserved5;
- u8 pmtu;
- u32 reserved6;
- u64 reserved7;
- union {
- struct {
- u64 word_0; /* always set to 6 */
- /*should be 0x1B for IB transport */
- u64 word_1;
- u64 word_2;
- u64 word_3;
- u64 word_4;
- } grh;
- struct {
- u32 wd_0;
- u32 wd_1;
- /* DWord_1 --> SGID */
-
- u32 sgid_wd3;
- u32 sgid_wd2;
-
- u32 sgid_wd1;
- u32 sgid_wd0;
- /* DWord_3 --> DGID */
-
- u32 dgid_wd3;
- u32 dgid_wd2;
-
- u32 dgid_wd1;
- u32 dgid_wd0;
- } grh_l;
- };
-};
-
-/* maximum number of sg entries allowed in a WQE */
-#define MAX_WQE_SG_ENTRIES 252
-
-#define WQE_OPTYPE_SEND 0x80
-#define WQE_OPTYPE_RDMAREAD 0x40
-#define WQE_OPTYPE_RDMAWRITE 0x20
-#define WQE_OPTYPE_CMPSWAP 0x10
-#define WQE_OPTYPE_FETCHADD 0x08
-#define WQE_OPTYPE_BIND 0x04
-
-#define WQE_WRFLAG_REQ_SIGNAL_COM 0x80
-#define WQE_WRFLAG_FENCE 0x40
-#define WQE_WRFLAG_IMM_DATA_PRESENT 0x20
-#define WQE_WRFLAG_SOLIC_EVENT 0x10
-
-#define WQEF_CACHE_HINT 0x80
-#define WQEF_CACHE_HINT_RD_WR 0x40
-#define WQEF_TIMED_WQE 0x20
-#define WQEF_PURGE 0x08
-#define WQEF_HIGH_NIBBLE 0xF0
-
-#define MW_BIND_ACCESSCTRL_R_WRITE 0x40
-#define MW_BIND_ACCESSCTRL_R_READ 0x20
-#define MW_BIND_ACCESSCTRL_R_ATOMIC 0x10
-
-struct ehca_wqe {
- u64 work_request_id;
- u8 optype;
- u8 wr_flag;
- u16 pkeyi;
- u8 wqef;
- u8 nr_of_data_seg;
- u16 wqe_provided_slid;
- u32 destination_qp_number;
- u32 resync_psn_sqp;
- u32 local_ee_context_qkey;
- u32 immediate_data;
- union {
- struct {
- u64 remote_virtual_address;
- u32 rkey;
- u32 reserved;
- u64 atomic_1st_op_dma_len;
- u64 atomic_2nd_op;
- struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
-
- } nud;
- struct {
- u64 ehca_ud_av_ptr;
- u64 reserved1;
- u64 reserved2;
- u64 reserved3;
- struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
- } ud_avp;
- struct {
- struct ehca_ud_av ud_av;
- struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES -
- 2];
- } ud_av;
- struct {
- u64 reserved0;
- u64 reserved1;
- u64 reserved2;
- u64 reserved3;
- struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
- } all_rcv;
-
- struct {
- u64 reserved;
- u32 rkey;
- u32 old_rkey;
- u64 reserved1;
- u64 reserved2;
- u64 virtual_address;
- u32 reserved3;
- u32 length;
- u32 reserved4;
- u16 reserved5;
- u8 reserved6;
- u8 lr_ctl;
- u32 lkey;
- u32 reserved7;
- u64 reserved8;
- u64 reserved9;
- u64 reserved10;
- u64 reserved11;
- } bind;
- struct {
- u64 reserved12;
- u64 reserved13;
- u32 size;
- u32 start;
- } inline_data;
- } u;
-
-};
-
-#define WC_SEND_RECEIVE EHCA_BMASK_IBM(0, 0)
-#define WC_IMM_DATA EHCA_BMASK_IBM(1, 1)
-#define WC_GRH_PRESENT EHCA_BMASK_IBM(2, 2)
-#define WC_SE_BIT EHCA_BMASK_IBM(3, 3)
-#define WC_STATUS_ERROR_BIT 0x80000000
-#define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800
-#define WC_STATUS_PURGE_BIT 0x10
-#define WC_SEND_RECEIVE_BIT 0x80
-
-struct ehca_cqe {
- u64 work_request_id;
- u8 optype;
- u8 w_completion_flags;
- u16 reserved1;
- u32 nr_bytes_transferred;
- u32 immediate_data;
- u32 local_qp_number;
- u8 freed_resource_count;
- u8 service_level;
- u16 wqe_count;
- u32 qp_token;
- u32 qkey_ee_token;
- u32 remote_qp_number;
- u16 dlid;
- u16 rlid;
- u16 reserved2;
- u16 pkey_index;
- u32 cqe_timestamp;
- u32 wqe_timestamp;
- u8 wqe_timestamp_valid;
- u8 reserved3;
- u8 reserved4;
- u8 cqe_flags;
- u32 status;
-};
-
-struct ehca_eqe {
- u64 entry;
-};
-
-struct ehca_mrte {
- u64 starting_va;
- u64 length; /* length of memory region in bytes*/
- u32 pd;
- u8 key_instance;
- u8 pagesize;
- u8 mr_control;
- u8 local_remote_access_ctrl;
- u8 reserved[0x20 - 0x18];
- u64 at_pointer[4];
-};
-#endif /*_EHCA_QES_H_*/
diff --git a/drivers/staging/rdma/ehca/ehca_qp.c b/drivers/staging/rdma/ehca/ehca_qp.c
deleted file mode 100644
index 896c01f810f6..000000000000
--- a/drivers/staging/rdma/ehca/ehca_qp.c
+++ /dev/null
@@ -1,2256 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * QP functions
- *
- * Authors: Joachim Fenkes <fenkes@de.ibm.com>
- * Stefan Roscher <stefan.roscher@de.ibm.com>
- * Waleri Fomin <fomin@de.ibm.com>
- * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- * Reinhard Ernst <rernst@de.ibm.com>
- * Heiko J Schick <schickhj@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_classes.h"
-#include "ehca_tools.h"
-#include "ehca_qes.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-#include "hipz_fns.h"
-
-static struct kmem_cache *qp_cache;
-
-/*
- * attributes not supported by query qp
- */
-#define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_ACCESS_FLAGS | \
- IB_QP_EN_SQD_ASYNC_NOTIFY)
-
-/*
- * ehca (internal) qp state values
- */
-enum ehca_qp_state {
- EHCA_QPS_RESET = 1,
- EHCA_QPS_INIT = 2,
- EHCA_QPS_RTR = 3,
- EHCA_QPS_RTS = 5,
- EHCA_QPS_SQD = 6,
- EHCA_QPS_SQE = 8,
- EHCA_QPS_ERR = 128
-};
-
-/*
- * qp state transitions as defined by IB Arch Rel 1.1 page 431
- */
-enum ib_qp_statetrans {
- IB_QPST_ANY2RESET,
- IB_QPST_ANY2ERR,
- IB_QPST_RESET2INIT,
- IB_QPST_INIT2RTR,
- IB_QPST_INIT2INIT,
- IB_QPST_RTR2RTS,
- IB_QPST_RTS2SQD,
- IB_QPST_RTS2RTS,
- IB_QPST_SQD2RTS,
- IB_QPST_SQE2RTS,
- IB_QPST_SQD2SQD,
- IB_QPST_MAX /* nr of transitions, this must be last!!! */
-};
-
-/*
- * ib2ehca_qp_state maps IB to ehca qp_state
- * returns ehca qp state corresponding to given ib qp state
- */
-static inline enum ehca_qp_state ib2ehca_qp_state(enum ib_qp_state ib_qp_state)
-{
- switch (ib_qp_state) {
- case IB_QPS_RESET:
- return EHCA_QPS_RESET;
- case IB_QPS_INIT:
- return EHCA_QPS_INIT;
- case IB_QPS_RTR:
- return EHCA_QPS_RTR;
- case IB_QPS_RTS:
- return EHCA_QPS_RTS;
- case IB_QPS_SQD:
- return EHCA_QPS_SQD;
- case IB_QPS_SQE:
- return EHCA_QPS_SQE;
- case IB_QPS_ERR:
- return EHCA_QPS_ERR;
- default:
- ehca_gen_err("invalid ib_qp_state=%x", ib_qp_state);
- return -EINVAL;
- }
-}
-
-/*
- * ehca2ib_qp_state maps ehca to IB qp_state
- * returns ib qp state corresponding to given ehca qp state
- */
-static inline enum ib_qp_state ehca2ib_qp_state(enum ehca_qp_state
- ehca_qp_state)
-{
- switch (ehca_qp_state) {
- case EHCA_QPS_RESET:
- return IB_QPS_RESET;
- case EHCA_QPS_INIT:
- return IB_QPS_INIT;
- case EHCA_QPS_RTR:
- return IB_QPS_RTR;
- case EHCA_QPS_RTS:
- return IB_QPS_RTS;
- case EHCA_QPS_SQD:
- return IB_QPS_SQD;
- case EHCA_QPS_SQE:
- return IB_QPS_SQE;
- case EHCA_QPS_ERR:
- return IB_QPS_ERR;
- default:
- ehca_gen_err("invalid ehca_qp_state=%x", ehca_qp_state);
- return -EINVAL;
- }
-}
-
-/*
- * ehca_qp_type used as index for req_attr and opt_attr of
- * struct ehca_modqp_statetrans
- */
-enum ehca_qp_type {
- QPT_RC = 0,
- QPT_UC = 1,
- QPT_UD = 2,
- QPT_SQP = 3,
- QPT_MAX
-};
-
-/*
- * ib2ehcaqptype maps Ib to ehca qp_type
- * returns ehca qp type corresponding to ib qp type
- */
-static inline enum ehca_qp_type ib2ehcaqptype(enum ib_qp_type ibqptype)
-{
- switch (ibqptype) {
- case IB_QPT_SMI:
- case IB_QPT_GSI:
- return QPT_SQP;
- case IB_QPT_RC:
- return QPT_RC;
- case IB_QPT_UC:
- return QPT_UC;
- case IB_QPT_UD:
- return QPT_UD;
- default:
- ehca_gen_err("Invalid ibqptype=%x", ibqptype);
- return -EINVAL;
- }
-}
-
-static inline enum ib_qp_statetrans get_modqp_statetrans(int ib_fromstate,
- int ib_tostate)
-{
- int index = -EINVAL;
- switch (ib_tostate) {
- case IB_QPS_RESET:
- index = IB_QPST_ANY2RESET;
- break;
- case IB_QPS_INIT:
- switch (ib_fromstate) {
- case IB_QPS_RESET:
- index = IB_QPST_RESET2INIT;
- break;
- case IB_QPS_INIT:
- index = IB_QPST_INIT2INIT;
- break;
- }
- break;
- case IB_QPS_RTR:
- if (ib_fromstate == IB_QPS_INIT)
- index = IB_QPST_INIT2RTR;
- break;
- case IB_QPS_RTS:
- switch (ib_fromstate) {
- case IB_QPS_RTR:
- index = IB_QPST_RTR2RTS;
- break;
- case IB_QPS_RTS:
- index = IB_QPST_RTS2RTS;
- break;
- case IB_QPS_SQD:
- index = IB_QPST_SQD2RTS;
- break;
- case IB_QPS_SQE:
- index = IB_QPST_SQE2RTS;
- break;
- }
- break;
- case IB_QPS_SQD:
- if (ib_fromstate == IB_QPS_RTS)
- index = IB_QPST_RTS2SQD;
- break;
- case IB_QPS_SQE:
- break;
- case IB_QPS_ERR:
- index = IB_QPST_ANY2ERR;
- break;
- default:
- break;
- }
- return index;
-}
-
-/*
- * ibqptype2servicetype returns hcp service type corresponding to given
- * ib qp type used by create_qp()
- */
-static inline int ibqptype2servicetype(enum ib_qp_type ibqptype)
-{
- switch (ibqptype) {
- case IB_QPT_SMI:
- case IB_QPT_GSI:
- return ST_UD;
- case IB_QPT_RC:
- return ST_RC;
- case IB_QPT_UC:
- return ST_UC;
- case IB_QPT_UD:
- return ST_UD;
- case IB_QPT_RAW_IPV6:
- return -EINVAL;
- case IB_QPT_RAW_ETHERTYPE:
- return -EINVAL;
- default:
- ehca_gen_err("Invalid ibqptype=%x", ibqptype);
- return -EINVAL;
- }
-}
-
-/*
- * init userspace queue info from ipz_queue data
- */
-static inline void queue2resp(struct ipzu_queue_resp *resp,
- struct ipz_queue *queue)
-{
- resp->qe_size = queue->qe_size;
- resp->act_nr_of_sg = queue->act_nr_of_sg;
- resp->queue_length = queue->queue_length;
- resp->pagesize = queue->pagesize;
- resp->toggle_state = queue->toggle_state;
- resp->offset = queue->offset;
-}
-
-/*
- * init_qp_queue initializes/constructs r/squeue and registers queue pages.
- */
-static inline int init_qp_queue(struct ehca_shca *shca,
- struct ehca_pd *pd,
- struct ehca_qp *my_qp,
- struct ipz_queue *queue,
- int q_type,
- u64 expected_hret,
- struct ehca_alloc_queue_parms *parms,
- int wqe_size)
-{
- int ret, cnt, ipz_rc, nr_q_pages;
- void *vpage;
- u64 rpage, h_ret;
- struct ib_device *ib_dev = &shca->ib_device;
- struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle;
-
- if (!parms->queue_size)
- return 0;
-
- if (parms->is_small) {
- nr_q_pages = 1;
- ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
- 128 << parms->page_size,
- wqe_size, parms->act_nr_sges, 1);
- } else {
- nr_q_pages = parms->queue_size;
- ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
- EHCA_PAGESIZE, wqe_size,
- parms->act_nr_sges, 0);
- }
-
- if (!ipz_rc) {
- ehca_err(ib_dev, "Cannot allocate page for queue. ipz_rc=%i",
- ipz_rc);
- return -EBUSY;
- }
-
- /* register queue pages */
- for (cnt = 0; cnt < nr_q_pages; cnt++) {
- vpage = ipz_qpageit_get_inc(queue);
- if (!vpage) {
- ehca_err(ib_dev, "ipz_qpageit_get_inc() "
- "failed p_vpage= %p", vpage);
- ret = -EINVAL;
- goto init_qp_queue1;
- }
- rpage = __pa(vpage);
-
- h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
- my_qp->ipz_qp_handle,
- NULL, 0, q_type,
- rpage, parms->is_small ? 0 : 1,
- my_qp->galpas.kernel);
- if (cnt == (nr_q_pages - 1)) { /* last page! */
- if (h_ret != expected_hret) {
- ehca_err(ib_dev, "hipz_qp_register_rpage() "
- "h_ret=%lli", h_ret);
- ret = ehca2ib_return_code(h_ret);
- goto init_qp_queue1;
- }
- vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
- if (vpage) {
- ehca_err(ib_dev, "ipz_qpageit_get_inc() "
- "should not succeed vpage=%p", vpage);
- ret = -EINVAL;
- goto init_qp_queue1;
- }
- } else {
- if (h_ret != H_PAGE_REGISTERED) {
- ehca_err(ib_dev, "hipz_qp_register_rpage() "
- "h_ret=%lli", h_ret);
- ret = ehca2ib_return_code(h_ret);
- goto init_qp_queue1;
- }
- }
- }
-
- ipz_qeit_reset(queue);
-
- return 0;
-
-init_qp_queue1:
- ipz_queue_dtor(pd, queue);
- return ret;
-}
-
-static inline int ehca_calc_wqe_size(int act_nr_sge, int is_llqp)
-{
- if (is_llqp)
- return 128 << act_nr_sge;
- else
- return offsetof(struct ehca_wqe,
- u.nud.sg_list[act_nr_sge]);
-}
-
-static void ehca_determine_small_queue(struct ehca_alloc_queue_parms *queue,
- int req_nr_sge, int is_llqp)
-{
- u32 wqe_size, q_size;
- int act_nr_sge = req_nr_sge;
-
- if (!is_llqp)
- /* round up #SGEs so WQE size is a power of 2 */
- for (act_nr_sge = 4; act_nr_sge <= 252;
- act_nr_sge = 4 + 2 * act_nr_sge)
- if (act_nr_sge >= req_nr_sge)
- break;
-
- wqe_size = ehca_calc_wqe_size(act_nr_sge, is_llqp);
- q_size = wqe_size * (queue->max_wr + 1);
-
- if (q_size <= 512)
- queue->page_size = 2;
- else if (q_size <= 1024)
- queue->page_size = 3;
- else
- queue->page_size = 0;
-
- queue->is_small = (queue->page_size != 0);
-}
-
-/* needs to be called with cq->spinlock held */
-void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq)
-{
- struct list_head *list, *node;
-
- /* TODO: support low latency QPs */
- if (qp->ext_type == EQPT_LLQP)
- return;
-
- if (on_sq) {
- list = &qp->send_cq->sqp_err_list;
- node = &qp->sq_err_node;
- } else {
- list = &qp->recv_cq->rqp_err_list;
- node = &qp->rq_err_node;
- }
-
- if (list_empty(node))
- list_add_tail(node, list);
-
- return;
-}
-
-static void del_from_err_list(struct ehca_cq *cq, struct list_head *node)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cq->spinlock, flags);
-
- if (!list_empty(node))
- list_del_init(node);
-
- spin_unlock_irqrestore(&cq->spinlock, flags);
-}
-
-static void reset_queue_map(struct ehca_queue_map *qmap)
-{
- int i;
-
- qmap->tail = qmap->entries - 1;
- qmap->left_to_poll = 0;
- qmap->next_wqe_idx = 0;
- for (i = 0; i < qmap->entries; i++) {
- qmap->map[i].reported = 1;
- qmap->map[i].cqe_req = 0;
- }
-}
-
-/*
- * Create an ib_qp struct that is either a QP or an SRQ, depending on
- * the value of the is_srq parameter. If init_attr and srq_init_attr share
- * fields, the field out of init_attr is used.
- */
-static struct ehca_qp *internal_create_qp(
- struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_srq_init_attr *srq_init_attr,
- struct ib_udata *udata, int is_srq)
-{
- struct ehca_qp *my_qp, *my_srq = NULL;
- struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
- struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
- ib_device);
- struct ib_ucontext *context = NULL;
- u64 h_ret;
- int is_llqp = 0, has_srq = 0, is_user = 0;
- int qp_type, max_send_sge, max_recv_sge, ret;
-
- /* h_call's out parameters */
- struct ehca_alloc_qp_parms parms;
- u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
- unsigned long flags;
-
- if (!atomic_add_unless(&shca->num_qps, 1, shca->max_num_qps)) {
- ehca_err(pd->device, "Unable to create QP, max number of %i "
- "QPs reached.", shca->max_num_qps);
- ehca_err(pd->device, "To increase the maximum number of QPs "
- "use the number_of_qps module parameter.\n");
- return ERR_PTR(-ENOSPC);
- }
-
- if (init_attr->create_flags) {
- atomic_dec(&shca->num_qps);
- return ERR_PTR(-EINVAL);
- }
-
- memset(&parms, 0, sizeof(parms));
- qp_type = init_attr->qp_type;
-
- if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR &&
- init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
- ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed",
- init_attr->sq_sig_type);
- atomic_dec(&shca->num_qps);
- return ERR_PTR(-EINVAL);
- }
-
- /* save LLQP info */
- if (qp_type & 0x80) {
- is_llqp = 1;
- parms.ext_type = EQPT_LLQP;
- parms.ll_comp_flags = qp_type & LLQP_COMP_MASK;
- }
- qp_type &= 0x1F;
- init_attr->qp_type &= 0x1F;
-
- /* handle SRQ base QPs */
- if (init_attr->srq) {
- my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq);
-
- if (qp_type == IB_QPT_UC) {
- ehca_err(pd->device, "UC with SRQ not supported");
- atomic_dec(&shca->num_qps);
- return ERR_PTR(-EINVAL);
- }
-
- has_srq = 1;
- parms.ext_type = EQPT_SRQBASE;
- parms.srq_qpn = my_srq->real_qp_num;
- }
-
- if (is_llqp && has_srq) {
- ehca_err(pd->device, "LLQPs can't have an SRQ");
- atomic_dec(&shca->num_qps);
- return ERR_PTR(-EINVAL);
- }
-
- /* handle SRQs */
- if (is_srq) {
- parms.ext_type = EQPT_SRQ;
- parms.srq_limit = srq_init_attr->attr.srq_limit;
- if (init_attr->cap.max_recv_sge > 3) {
- ehca_err(pd->device, "no more than three SGEs "
- "supported for SRQ pd=%p max_sge=%x",
- pd, init_attr->cap.max_recv_sge);
- atomic_dec(&shca->num_qps);
- return ERR_PTR(-EINVAL);
- }
- }
-
- /* check QP type */
- if (qp_type != IB_QPT_UD &&
- qp_type != IB_QPT_UC &&
- qp_type != IB_QPT_RC &&
- qp_type != IB_QPT_SMI &&
- qp_type != IB_QPT_GSI) {
- ehca_err(pd->device, "wrong QP Type=%x", qp_type);
- atomic_dec(&shca->num_qps);
- return ERR_PTR(-EINVAL);
- }
-
- if (is_llqp) {
- switch (qp_type) {
- case IB_QPT_RC:
- if ((init_attr->cap.max_send_wr > 255) ||
- (init_attr->cap.max_recv_wr > 255)) {
- ehca_err(pd->device,
- "Invalid Number of max_sq_wr=%x "
- "or max_rq_wr=%x for RC LLQP",
- init_attr->cap.max_send_wr,
- init_attr->cap.max_recv_wr);
- atomic_dec(&shca->num_qps);
- return ERR_PTR(-EINVAL);
- }
- break;
- case IB_QPT_UD:
- if (!EHCA_BMASK_GET(HCA_CAP_UD_LL_QP, shca->hca_cap)) {
- ehca_err(pd->device, "UD LLQP not supported "
- "by this adapter");
- atomic_dec(&shca->num_qps);
- return ERR_PTR(-ENOSYS);
- }
- if (!(init_attr->cap.max_send_sge <= 5
- && init_attr->cap.max_send_sge >= 1
- && init_attr->cap.max_recv_sge <= 5
- && init_attr->cap.max_recv_sge >= 1)) {
- ehca_err(pd->device,
- "Invalid Number of max_send_sge=%x "
- "or max_recv_sge=%x for UD LLQP",
- init_attr->cap.max_send_sge,
- init_attr->cap.max_recv_sge);
- atomic_dec(&shca->num_qps);
- return ERR_PTR(-EINVAL);
- } else if (init_attr->cap.max_send_wr > 255) {
- ehca_err(pd->device,
- "Invalid Number of "
- "max_send_wr=%x for UD QP_TYPE=%x",
- init_attr->cap.max_send_wr, qp_type);
- atomic_dec(&shca->num_qps);
- return ERR_PTR(-EINVAL);
- }
- break;
- default:
- ehca_err(pd->device, "unsupported LL QP Type=%x",
- qp_type);
- atomic_dec(&shca->num_qps);
- return ERR_PTR(-EINVAL);
- }
- } else {
- int max_sge = (qp_type == IB_QPT_UD || qp_type == IB_QPT_SMI
- || qp_type == IB_QPT_GSI) ? 250 : 252;
-
- if (init_attr->cap.max_send_sge > max_sge
- || init_attr->cap.max_recv_sge > max_sge) {
- ehca_err(pd->device, "Invalid number of SGEs requested "
- "send_sge=%x recv_sge=%x max_sge=%x",
- init_attr->cap.max_send_sge,
- init_attr->cap.max_recv_sge, max_sge);
- atomic_dec(&shca->num_qps);
- return ERR_PTR(-EINVAL);
- }
- }
-
- my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
- if (!my_qp) {
- ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
- atomic_dec(&shca->num_qps);
- return ERR_PTR(-ENOMEM);
- }
-
- if (pd->uobject && udata) {
- is_user = 1;
- context = pd->uobject->context;
- }
-
- atomic_set(&my_qp->nr_events, 0);
- init_waitqueue_head(&my_qp->wait_completion);
- spin_lock_init(&my_qp->spinlock_s);
- spin_lock_init(&my_qp->spinlock_r);
- my_qp->qp_type = qp_type;
- my_qp->ext_type = parms.ext_type;
- my_qp->state = IB_QPS_RESET;
-
- if (init_attr->recv_cq)
- my_qp->recv_cq =
- container_of(init_attr->recv_cq, struct ehca_cq, ib_cq);
- if (init_attr->send_cq)
- my_qp->send_cq =
- container_of(init_attr->send_cq, struct ehca_cq, ib_cq);
-
- idr_preload(GFP_KERNEL);
- write_lock_irqsave(&ehca_qp_idr_lock, flags);
-
- ret = idr_alloc(&ehca_qp_idr, my_qp, 0, 0x2000000, GFP_NOWAIT);
- if (ret >= 0)
- my_qp->token = ret;
-
- write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
- idr_preload_end();
- if (ret < 0) {
- if (ret == -ENOSPC) {
- ret = -EINVAL;
- ehca_err(pd->device, "Invalid number of qp");
- } else {
- ret = -ENOMEM;
- ehca_err(pd->device, "Can't allocate new idr entry.");
- }
- goto create_qp_exit0;
- }
-
- if (has_srq)
- parms.srq_token = my_qp->token;
-
- parms.servicetype = ibqptype2servicetype(qp_type);
- if (parms.servicetype < 0) {
- ret = -EINVAL;
- ehca_err(pd->device, "Invalid qp_type=%x", qp_type);
- goto create_qp_exit1;
- }
-
- /* Always signal by WQE so we can hide circ. WQEs */
- parms.sigtype = HCALL_SIGT_BY_WQE;
-
- /* UD_AV CIRCUMVENTION */
- max_send_sge = init_attr->cap.max_send_sge;
- max_recv_sge = init_attr->cap.max_recv_sge;
- if (parms.servicetype == ST_UD && !is_llqp) {
- max_send_sge += 2;
- max_recv_sge += 2;
- }
-
- parms.token = my_qp->token;
- parms.eq_handle = shca->eq.ipz_eq_handle;
- parms.pd = my_pd->fw_pd;
- if (my_qp->send_cq)
- parms.send_cq_handle = my_qp->send_cq->ipz_cq_handle;
- if (my_qp->recv_cq)
- parms.recv_cq_handle = my_qp->recv_cq->ipz_cq_handle;
-
- parms.squeue.max_wr = init_attr->cap.max_send_wr;
- parms.rqueue.max_wr = init_attr->cap.max_recv_wr;
- parms.squeue.max_sge = max_send_sge;
- parms.rqueue.max_sge = max_recv_sge;
-
- /* RC QPs need one more SWQE for unsolicited ack circumvention */
- if (qp_type == IB_QPT_RC)
- parms.squeue.max_wr++;
-
- if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)) {
- if (HAS_SQ(my_qp))
- ehca_determine_small_queue(
- &parms.squeue, max_send_sge, is_llqp);
- if (HAS_RQ(my_qp))
- ehca_determine_small_queue(
- &parms.rqueue, max_recv_sge, is_llqp);
- parms.qp_storage =
- (parms.squeue.is_small || parms.rqueue.is_small);
- }
-
- h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms, is_user);
- if (h_ret != H_SUCCESS) {
- ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli",
- h_ret);
- ret = ehca2ib_return_code(h_ret);
- goto create_qp_exit1;
- }
-
- ib_qp_num = my_qp->real_qp_num = parms.real_qp_num;
- my_qp->ipz_qp_handle = parms.qp_handle;
- my_qp->galpas = parms.galpas;
-
- swqe_size = ehca_calc_wqe_size(parms.squeue.act_nr_sges, is_llqp);
- rwqe_size = ehca_calc_wqe_size(parms.rqueue.act_nr_sges, is_llqp);
-
- switch (qp_type) {
- case IB_QPT_RC:
- if (is_llqp) {
- parms.squeue.act_nr_sges = 1;
- parms.rqueue.act_nr_sges = 1;
- }
- /* hide the extra WQE */
- parms.squeue.act_nr_wqes--;
- break;
- case IB_QPT_UD:
- case IB_QPT_GSI:
- case IB_QPT_SMI:
- /* UD circumvention */
- if (is_llqp) {
- parms.squeue.act_nr_sges = 1;
- parms.rqueue.act_nr_sges = 1;
- } else {
- parms.squeue.act_nr_sges -= 2;
- parms.rqueue.act_nr_sges -= 2;
- }
-
- if (IB_QPT_GSI == qp_type || IB_QPT_SMI == qp_type) {
- parms.squeue.act_nr_wqes = init_attr->cap.max_send_wr;
- parms.rqueue.act_nr_wqes = init_attr->cap.max_recv_wr;
- parms.squeue.act_nr_sges = init_attr->cap.max_send_sge;
- parms.rqueue.act_nr_sges = init_attr->cap.max_recv_sge;
- ib_qp_num = (qp_type == IB_QPT_SMI) ? 0 : 1;
- }
-
- break;
-
- default:
- break;
- }
-
- /* initialize r/squeue and register queue pages */
- if (HAS_SQ(my_qp)) {
- ret = init_qp_queue(
- shca, my_pd, my_qp, &my_qp->ipz_squeue, 0,
- HAS_RQ(my_qp) ? H_PAGE_REGISTERED : H_SUCCESS,
- &parms.squeue, swqe_size);
- if (ret) {
- ehca_err(pd->device, "Couldn't initialize squeue "
- "and pages ret=%i", ret);
- goto create_qp_exit2;
- }
-
- if (!is_user) {
- my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length /
- my_qp->ipz_squeue.qe_size;
- my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries *
- sizeof(struct ehca_qmap_entry));
- if (!my_qp->sq_map.map) {
- ehca_err(pd->device, "Couldn't allocate squeue "
- "map ret=%i", ret);
- goto create_qp_exit3;
- }
- INIT_LIST_HEAD(&my_qp->sq_err_node);
- /* to avoid the generation of bogus flush CQEs */
- reset_queue_map(&my_qp->sq_map);
- }
- }
-
- if (HAS_RQ(my_qp)) {
- ret = init_qp_queue(
- shca, my_pd, my_qp, &my_qp->ipz_rqueue, 1,
- H_SUCCESS, &parms.rqueue, rwqe_size);
- if (ret) {
- ehca_err(pd->device, "Couldn't initialize rqueue "
- "and pages ret=%i", ret);
- goto create_qp_exit4;
- }
- if (!is_user) {
- my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length /
- my_qp->ipz_rqueue.qe_size;
- my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries *
- sizeof(struct ehca_qmap_entry));
- if (!my_qp->rq_map.map) {
- ehca_err(pd->device, "Couldn't allocate squeue "
- "map ret=%i", ret);
- goto create_qp_exit5;
- }
- INIT_LIST_HEAD(&my_qp->rq_err_node);
- /* to avoid the generation of bogus flush CQEs */
- reset_queue_map(&my_qp->rq_map);
- }
- } else if (init_attr->srq && !is_user) {
- /* this is a base QP, use the queue map of the SRQ */
- my_qp->rq_map = my_srq->rq_map;
- INIT_LIST_HEAD(&my_qp->rq_err_node);
-
- my_qp->ipz_rqueue = my_srq->ipz_rqueue;
- }
-
- if (is_srq) {
- my_qp->ib_srq.pd = &my_pd->ib_pd;
- my_qp->ib_srq.device = my_pd->ib_pd.device;
-
- my_qp->ib_srq.srq_context = init_attr->qp_context;
- my_qp->ib_srq.event_handler = init_attr->event_handler;
- } else {
- my_qp->ib_qp.qp_num = ib_qp_num;
- my_qp->ib_qp.pd = &my_pd->ib_pd;
- my_qp->ib_qp.device = my_pd->ib_pd.device;
-
- my_qp->ib_qp.recv_cq = init_attr->recv_cq;
- my_qp->ib_qp.send_cq = init_attr->send_cq;
-
- my_qp->ib_qp.qp_type = qp_type;
- my_qp->ib_qp.srq = init_attr->srq;
-
- my_qp->ib_qp.qp_context = init_attr->qp_context;
- my_qp->ib_qp.event_handler = init_attr->event_handler;
- }
-
- init_attr->cap.max_inline_data = 0; /* not supported yet */
- init_attr->cap.max_recv_sge = parms.rqueue.act_nr_sges;
- init_attr->cap.max_recv_wr = parms.rqueue.act_nr_wqes;
- init_attr->cap.max_send_sge = parms.squeue.act_nr_sges;
- init_attr->cap.max_send_wr = parms.squeue.act_nr_wqes;
- my_qp->init_attr = *init_attr;
-
- if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
- shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
- &my_qp->ib_qp;
- if (ehca_nr_ports < 0) {
- /* alloc array to cache subsequent modify qp parms
- * for autodetect mode
- */
- my_qp->mod_qp_parm =
- kzalloc(EHCA_MOD_QP_PARM_MAX *
- sizeof(*my_qp->mod_qp_parm),
- GFP_KERNEL);
- if (!my_qp->mod_qp_parm) {
- ehca_err(pd->device,
- "Could not alloc mod_qp_parm");
- goto create_qp_exit5;
- }
- }
- }
-
- /* NOTE: define_apq0() not supported yet */
- if (qp_type == IB_QPT_GSI) {
- h_ret = ehca_define_sqp(shca, my_qp, init_attr);
- if (h_ret != H_SUCCESS) {
- kfree(my_qp->mod_qp_parm);
- my_qp->mod_qp_parm = NULL;
- /* the QP pointer is no longer valid */
- shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
- NULL;
- ret = ehca2ib_return_code(h_ret);
- goto create_qp_exit6;
- }
- }
-
- if (my_qp->send_cq) {
- ret = ehca_cq_assign_qp(my_qp->send_cq, my_qp);
- if (ret) {
- ehca_err(pd->device,
- "Couldn't assign qp to send_cq ret=%i", ret);
- goto create_qp_exit7;
- }
- }
-
- /* copy queues, galpa data to user space */
- if (context && udata) {
- struct ehca_create_qp_resp resp;
- memset(&resp, 0, sizeof(resp));
-
- resp.qp_num = my_qp->real_qp_num;
- resp.token = my_qp->token;
- resp.qp_type = my_qp->qp_type;
- resp.ext_type = my_qp->ext_type;
- resp.qkey = my_qp->qkey;
- resp.real_qp_num = my_qp->real_qp_num;
-
- if (HAS_SQ(my_qp))
- queue2resp(&resp.ipz_squeue, &my_qp->ipz_squeue);
- if (HAS_RQ(my_qp))
- queue2resp(&resp.ipz_rqueue, &my_qp->ipz_rqueue);
- resp.fw_handle_ofs = (u32)
- (my_qp->galpas.user.fw_handle & (PAGE_SIZE - 1));
-
- if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
- ehca_err(pd->device, "Copy to udata failed");
- ret = -EINVAL;
- goto create_qp_exit8;
- }
- }
-
- return my_qp;
-
-create_qp_exit8:
- ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num);
-
-create_qp_exit7:
- kfree(my_qp->mod_qp_parm);
-
-create_qp_exit6:
- if (HAS_RQ(my_qp) && !is_user)
- vfree(my_qp->rq_map.map);
-
-create_qp_exit5:
- if (HAS_RQ(my_qp))
- ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
-
-create_qp_exit4:
- if (HAS_SQ(my_qp) && !is_user)
- vfree(my_qp->sq_map.map);
-
-create_qp_exit3:
- if (HAS_SQ(my_qp))
- ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
-
-create_qp_exit2:
- hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
-
-create_qp_exit1:
- write_lock_irqsave(&ehca_qp_idr_lock, flags);
- idr_remove(&ehca_qp_idr, my_qp->token);
- write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
-
-create_qp_exit0:
- kmem_cache_free(qp_cache, my_qp);
- atomic_dec(&shca->num_qps);
- return ERR_PTR(ret);
-}
-
-struct ib_qp *ehca_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *qp_init_attr,
- struct ib_udata *udata)
-{
- struct ehca_qp *ret;
-
- ret = internal_create_qp(pd, qp_init_attr, NULL, udata, 0);
- return IS_ERR(ret) ? (struct ib_qp *)ret : &ret->ib_qp;
-}
-
-static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
- struct ib_uobject *uobject);
-
-struct ib_srq *ehca_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *srq_init_attr,
- struct ib_udata *udata)
-{
- struct ib_qp_init_attr qp_init_attr;
- struct ehca_qp *my_qp;
- struct ib_srq *ret;
- struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
- ib_device);
- struct hcp_modify_qp_control_block *mqpcb;
- u64 hret, update_mask;
-
- if (srq_init_attr->srq_type != IB_SRQT_BASIC)
- return ERR_PTR(-ENOSYS);
-
- /* For common attributes, internal_create_qp() takes its info
- * out of qp_init_attr, so copy all common attrs there.
- */
- memset(&qp_init_attr, 0, sizeof(qp_init_attr));
- qp_init_attr.event_handler = srq_init_attr->event_handler;
- qp_init_attr.qp_context = srq_init_attr->srq_context;
- qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
- qp_init_attr.qp_type = IB_QPT_RC;
- qp_init_attr.cap.max_recv_wr = srq_init_attr->attr.max_wr;
- qp_init_attr.cap.max_recv_sge = srq_init_attr->attr.max_sge;
-
- my_qp = internal_create_qp(pd, &qp_init_attr, srq_init_attr, udata, 1);
- if (IS_ERR(my_qp))
- return (struct ib_srq *)my_qp;
-
- /* copy back return values */
- srq_init_attr->attr.max_wr = qp_init_attr.cap.max_recv_wr;
- srq_init_attr->attr.max_sge = 3;
-
- /* drive SRQ into RTR state */
- mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
- if (!mqpcb) {
- ehca_err(pd->device, "Could not get zeroed page for mqpcb "
- "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
- ret = ERR_PTR(-ENOMEM);
- goto create_srq1;
- }
-
- mqpcb->qp_state = EHCA_QPS_INIT;
- mqpcb->prim_phys_port = 1;
- update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
- hret = hipz_h_modify_qp(shca->ipz_hca_handle,
- my_qp->ipz_qp_handle,
- &my_qp->pf,
- update_mask,
- mqpcb, my_qp->galpas.kernel);
- if (hret != H_SUCCESS) {
- ehca_err(pd->device, "Could not modify SRQ to INIT "
- "ehca_qp=%p qp_num=%x h_ret=%lli",
- my_qp, my_qp->real_qp_num, hret);
- goto create_srq2;
- }
-
- mqpcb->qp_enable = 1;
- update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
- hret = hipz_h_modify_qp(shca->ipz_hca_handle,
- my_qp->ipz_qp_handle,
- &my_qp->pf,
- update_mask,
- mqpcb, my_qp->galpas.kernel);
- if (hret != H_SUCCESS) {
- ehca_err(pd->device, "Could not enable SRQ "
- "ehca_qp=%p qp_num=%x h_ret=%lli",
- my_qp, my_qp->real_qp_num, hret);
- goto create_srq2;
- }
-
- mqpcb->qp_state = EHCA_QPS_RTR;
- update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
- hret = hipz_h_modify_qp(shca->ipz_hca_handle,
- my_qp->ipz_qp_handle,
- &my_qp->pf,
- update_mask,
- mqpcb, my_qp->galpas.kernel);
- if (hret != H_SUCCESS) {
- ehca_err(pd->device, "Could not modify SRQ to RTR "
- "ehca_qp=%p qp_num=%x h_ret=%lli",
- my_qp, my_qp->real_qp_num, hret);
- goto create_srq2;
- }
-
- ehca_free_fw_ctrlblock(mqpcb);
-
- return &my_qp->ib_srq;
-
-create_srq2:
- ret = ERR_PTR(ehca2ib_return_code(hret));
- ehca_free_fw_ctrlblock(mqpcb);
-
-create_srq1:
- internal_destroy_qp(pd->device, my_qp, my_qp->ib_srq.uobject);
-
- return ret;
-}
-
-/*
- * prepare_sqe_rts called by internal_modify_qp() at trans sqe -> rts
- * set purge bit of bad wqe and subsequent wqes to avoid reentering sqe
- * returns total number of bad wqes in bad_wqe_cnt
- */
-static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
- int *bad_wqe_cnt)
-{
- u64 h_ret;
- struct ipz_queue *squeue;
- void *bad_send_wqe_p, *bad_send_wqe_v;
- u64 q_ofs;
- struct ehca_wqe *wqe;
- int qp_num = my_qp->ib_qp.qp_num;
-
- /* get send wqe pointer */
- h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
- my_qp->ipz_qp_handle, &my_qp->pf,
- &bad_send_wqe_p, NULL, 2);
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed"
- " ehca_qp=%p qp_num=%x h_ret=%lli",
- my_qp, qp_num, h_ret);
- return ehca2ib_return_code(h_ret);
- }
- bad_send_wqe_p = (void *)((u64)bad_send_wqe_p & (~(1L << 63)));
- ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p",
- qp_num, bad_send_wqe_p);
- /* convert wqe pointer to vadr */
- bad_send_wqe_v = __va((u64)bad_send_wqe_p);
- if (ehca_debug_level >= 2)
- ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
- squeue = &my_qp->ipz_squeue;
- if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) {
- ehca_err(&shca->ib_device, "failed to get wqe offset qp_num=%x"
- " bad_send_wqe_p=%p", qp_num, bad_send_wqe_p);
- return -EFAULT;
- }
-
- /* loop sets wqe's purge bit */
- wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
- *bad_wqe_cnt = 0;
- while (wqe->optype != 0xff && wqe->wqef != 0xff) {
- if (ehca_debug_level >= 2)
- ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
- wqe->nr_of_data_seg = 0; /* suppress data access */
- wqe->wqef = WQEF_PURGE; /* WQE to be purged */
- q_ofs = ipz_queue_advance_offset(squeue, q_ofs);
- wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
- *bad_wqe_cnt = (*bad_wqe_cnt)+1;
- }
- /*
- * bad wqe will be reprocessed and ignored when pol_cq() is called,
- * i.e. nr of wqes with flush error status is one less
- */
- ehca_dbg(&shca->ib_device, "qp_num=%x flusherr_wqe_cnt=%x",
- qp_num, (*bad_wqe_cnt)-1);
- wqe->wqef = 0;
-
- return 0;
-}
-
-static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
- struct ehca_queue_map *qmap)
-{
- void *wqe_v;
- u64 q_ofs;
- u32 wqe_idx;
- unsigned int tail_idx;
-
- /* convert real to abs address */
- wqe_p = wqe_p & (~(1UL << 63));
-
- wqe_v = __va(wqe_p);
-
- if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) {
- ehca_gen_err("Invalid offset for calculating left cqes "
- "wqe_p=%#llx wqe_v=%p\n", wqe_p, wqe_v);
- return -EFAULT;
- }
-
- tail_idx = next_index(qmap->tail, qmap->entries);
- wqe_idx = q_ofs / ipz_queue->qe_size;
-
- /* check all processed wqes, whether a cqe is requested or not */
- while (tail_idx != wqe_idx) {
- if (qmap->map[tail_idx].cqe_req)
- qmap->left_to_poll++;
- tail_idx = next_index(tail_idx, qmap->entries);
- }
- /* save index in queue, where we have to start flushing */
- qmap->next_wqe_idx = wqe_idx;
- return 0;
-}
-
-static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
-{
- u64 h_ret;
- void *send_wqe_p, *recv_wqe_p;
- int ret;
- unsigned long flags;
- int qp_num = my_qp->ib_qp.qp_num;
-
- /* this hcall is not supported on base QPs */
- if (my_qp->ext_type != EQPT_SRQBASE) {
- /* get send and receive wqe pointer */
- h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
- my_qp->ipz_qp_handle, &my_qp->pf,
- &send_wqe_p, &recv_wqe_p, 4);
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "disable_and_get_wqe() "
- "failed ehca_qp=%p qp_num=%x h_ret=%lli",
- my_qp, qp_num, h_ret);
- return ehca2ib_return_code(h_ret);
- }
-
- /*
- * acquire lock to ensure that nobody is polling the cq which
- * could mean that the qmap->tail pointer is in an
- * inconsistent state.
- */
- spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
- ret = calc_left_cqes((u64)send_wqe_p, &my_qp->ipz_squeue,
- &my_qp->sq_map);
- spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
- if (ret)
- return ret;
-
-
- spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
- ret = calc_left_cqes((u64)recv_wqe_p, &my_qp->ipz_rqueue,
- &my_qp->rq_map);
- spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
- if (ret)
- return ret;
- } else {
- spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
- my_qp->sq_map.left_to_poll = 0;
- my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
- my_qp->sq_map.entries);
- spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
-
- spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
- my_qp->rq_map.left_to_poll = 0;
- my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
- my_qp->rq_map.entries);
- spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
- }
-
- /* this assures flush cqes being generated only for pending wqes */
- if ((my_qp->sq_map.left_to_poll == 0) &&
- (my_qp->rq_map.left_to_poll == 0)) {
- spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
- ehca_add_to_err_list(my_qp, 1);
- spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
-
- if (HAS_RQ(my_qp)) {
- spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
- ehca_add_to_err_list(my_qp, 0);
- spin_unlock_irqrestore(&my_qp->recv_cq->spinlock,
- flags);
- }
- }
-
- return 0;
-}
-
-/*
- * internal_modify_qp with circumvention to handle aqp0 properly
- * smi_reset2init indicates if this is an internal reset-to-init-call for
- * smi. This flag must always be zero if called from ehca_modify_qp()!
- * This internal func was intorduced to avoid recursion of ehca_modify_qp()!
- */
-static int internal_modify_qp(struct ib_qp *ibqp,
- struct ib_qp_attr *attr,
- int attr_mask, int smi_reset2init)
-{
- enum ib_qp_state qp_cur_state, qp_new_state;
- int cnt, qp_attr_idx, ret = 0;
- enum ib_qp_statetrans statetrans;
- struct hcp_modify_qp_control_block *mqpcb;
- struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
- struct ehca_shca *shca =
- container_of(ibqp->pd->device, struct ehca_shca, ib_device);
- u64 update_mask;
- u64 h_ret;
- int bad_wqe_cnt = 0;
- int is_user = 0;
- int squeue_locked = 0;
- unsigned long flags = 0;
-
- /* do query_qp to obtain current attr values */
- mqpcb = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
- if (!mqpcb) {
- ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
- "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
- return -ENOMEM;
- }
-
- h_ret = hipz_h_query_qp(shca->ipz_hca_handle,
- my_qp->ipz_qp_handle,
- &my_qp->pf,
- mqpcb, my_qp->galpas.kernel);
- if (h_ret != H_SUCCESS) {
- ehca_err(ibqp->device, "hipz_h_query_qp() failed "
- "ehca_qp=%p qp_num=%x h_ret=%lli",
- my_qp, ibqp->qp_num, h_ret);
- ret = ehca2ib_return_code(h_ret);
- goto modify_qp_exit1;
- }
- if (ibqp->uobject)
- is_user = 1;
-
- qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state);
-
- if (qp_cur_state == -EINVAL) { /* invalid qp state */
- ret = -EINVAL;
- ehca_err(ibqp->device, "Invalid current ehca_qp_state=%x "
- "ehca_qp=%p qp_num=%x",
- mqpcb->qp_state, my_qp, ibqp->qp_num);
- goto modify_qp_exit1;
- }
- /*
- * circumvention to set aqp0 initial state to init
- * as expected by IB spec
- */
- if (smi_reset2init == 0 &&
- ibqp->qp_type == IB_QPT_SMI &&
- qp_cur_state == IB_QPS_RESET &&
- (attr_mask & IB_QP_STATE) &&
- attr->qp_state == IB_QPS_INIT) { /* RESET -> INIT */
- struct ib_qp_attr smiqp_attr = {
- .qp_state = IB_QPS_INIT,
- .port_num = my_qp->init_attr.port_num,
- .pkey_index = 0,
- .qkey = 0
- };
- int smiqp_attr_mask = IB_QP_STATE | IB_QP_PORT |
- IB_QP_PKEY_INDEX | IB_QP_QKEY;
- int smirc = internal_modify_qp(
- ibqp, &smiqp_attr, smiqp_attr_mask, 1);
- if (smirc) {
- ehca_err(ibqp->device, "SMI RESET -> INIT failed. "
- "ehca_modify_qp() rc=%i", smirc);
- ret = H_PARAMETER;
- goto modify_qp_exit1;
- }
- qp_cur_state = IB_QPS_INIT;
- ehca_dbg(ibqp->device, "SMI RESET -> INIT succeeded");
- }
- /* is transmitted current state equal to "real" current state */
- if ((attr_mask & IB_QP_CUR_STATE) &&
- qp_cur_state != attr->cur_qp_state) {
- ret = -EINVAL;
- ehca_err(ibqp->device,
- "Invalid IB_QP_CUR_STATE attr->curr_qp_state=%x <>"
- " actual cur_qp_state=%x. ehca_qp=%p qp_num=%x",
- attr->cur_qp_state, qp_cur_state, my_qp, ibqp->qp_num);
- goto modify_qp_exit1;
- }
-
- ehca_dbg(ibqp->device, "ehca_qp=%p qp_num=%x current qp_state=%x "
- "new qp_state=%x attribute_mask=%x",
- my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask);
-
- qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state;
- if (!smi_reset2init &&
- !ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type,
- attr_mask, IB_LINK_LAYER_UNSPECIFIED)) {
- ret = -EINVAL;
- ehca_err(ibqp->device,
- "Invalid qp transition new_state=%x cur_state=%x "
- "ehca_qp=%p qp_num=%x attr_mask=%x", qp_new_state,
- qp_cur_state, my_qp, ibqp->qp_num, attr_mask);
- goto modify_qp_exit1;
- }
-
- mqpcb->qp_state = ib2ehca_qp_state(qp_new_state);
- if (mqpcb->qp_state)
- update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
- else {
- ret = -EINVAL;
- ehca_err(ibqp->device, "Invalid new qp state=%x "
- "ehca_qp=%p qp_num=%x",
- qp_new_state, my_qp, ibqp->qp_num);
- goto modify_qp_exit1;
- }
-
- /* retrieve state transition struct to get req and opt attrs */
- statetrans = get_modqp_statetrans(qp_cur_state, qp_new_state);
- if (statetrans < 0) {
- ret = -EINVAL;
- ehca_err(ibqp->device, "<INVALID STATE CHANGE> qp_cur_state=%x "
- "new_qp_state=%x State_xsition=%x ehca_qp=%p "
- "qp_num=%x", qp_cur_state, qp_new_state,
- statetrans, my_qp, ibqp->qp_num);
- goto modify_qp_exit1;
- }
-
- qp_attr_idx = ib2ehcaqptype(ibqp->qp_type);
-
- if (qp_attr_idx < 0) {
- ret = qp_attr_idx;
- ehca_err(ibqp->device,
- "Invalid QP type=%x ehca_qp=%p qp_num=%x",
- ibqp->qp_type, my_qp, ibqp->qp_num);
- goto modify_qp_exit1;
- }
-
- ehca_dbg(ibqp->device,
- "ehca_qp=%p qp_num=%x <VALID STATE CHANGE> qp_state_xsit=%x",
- my_qp, ibqp->qp_num, statetrans);
-
- /* eHCA2 rev2 and higher require the SEND_GRH_FLAG to be set
- * in non-LL UD QPs.
- */
- if ((my_qp->qp_type == IB_QPT_UD) &&
- (my_qp->ext_type != EQPT_LLQP) &&
- (statetrans == IB_QPST_INIT2RTR) &&
- (shca->hw_level >= 0x22)) {
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
- mqpcb->send_grh_flag = 1;
- }
-
- /* sqe -> rts: set purge bit of bad wqe before actual trans */
- if ((my_qp->qp_type == IB_QPT_UD ||
- my_qp->qp_type == IB_QPT_GSI ||
- my_qp->qp_type == IB_QPT_SMI) &&
- statetrans == IB_QPST_SQE2RTS) {
- /* mark next free wqe if kernel */
- if (!ibqp->uobject) {
- struct ehca_wqe *wqe;
- /* lock send queue */
- spin_lock_irqsave(&my_qp->spinlock_s, flags);
- squeue_locked = 1;
- /* mark next free wqe */
- wqe = (struct ehca_wqe *)
- ipz_qeit_get(&my_qp->ipz_squeue);
- wqe->optype = wqe->wqef = 0xff;
- ehca_dbg(ibqp->device, "qp_num=%x next_free_wqe=%p",
- ibqp->qp_num, wqe);
- }
- ret = prepare_sqe_rts(my_qp, shca, &bad_wqe_cnt);
- if (ret) {
- ehca_err(ibqp->device, "prepare_sqe_rts() failed "
- "ehca_qp=%p qp_num=%x ret=%i",
- my_qp, ibqp->qp_num, ret);
- goto modify_qp_exit2;
- }
- }
-
- /*
- * enable RDMA_Atomic_Control if reset->init und reliable con
- * this is necessary since gen2 does not provide that flag,
- * but pHyp requires it
- */
- if (statetrans == IB_QPST_RESET2INIT &&
- (ibqp->qp_type == IB_QPT_RC || ibqp->qp_type == IB_QPT_UC)) {
- mqpcb->rdma_atomic_ctrl = 3;
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RDMA_ATOMIC_CTRL, 1);
- }
- /* circ. pHyp requires #RDMA/Atomic Resp Res for UC INIT -> RTR */
- if (statetrans == IB_QPST_INIT2RTR &&
- (ibqp->qp_type == IB_QPT_UC) &&
- !(attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)) {
- mqpcb->rdma_nr_atomic_resp_res = 1; /* default to 1 */
- update_mask |=
- EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
- }
-
- if (attr_mask & IB_QP_PKEY_INDEX) {
- if (attr->pkey_index >= 16) {
- ret = -EINVAL;
- ehca_err(ibqp->device, "Invalid pkey_index=%x. "
- "ehca_qp=%p qp_num=%x max_pkey_index=f",
- attr->pkey_index, my_qp, ibqp->qp_num);
- goto modify_qp_exit2;
- }
- mqpcb->prim_p_key_idx = attr->pkey_index;
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1);
- }
- if (attr_mask & IB_QP_PORT) {
- struct ehca_sport *sport;
- struct ehca_qp *aqp1;
- if (attr->port_num < 1 || attr->port_num > shca->num_ports) {
- ret = -EINVAL;
- ehca_err(ibqp->device, "Invalid port=%x. "
- "ehca_qp=%p qp_num=%x num_ports=%x",
- attr->port_num, my_qp, ibqp->qp_num,
- shca->num_ports);
- goto modify_qp_exit2;
- }
- sport = &shca->sport[attr->port_num - 1];
- if (!sport->ibqp_sqp[IB_QPT_GSI]) {
- /* should not occur */
- ret = -EFAULT;
- ehca_err(ibqp->device, "AQP1 was not created for "
- "port=%x", attr->port_num);
- goto modify_qp_exit2;
- }
- aqp1 = container_of(sport->ibqp_sqp[IB_QPT_GSI],
- struct ehca_qp, ib_qp);
- if (ibqp->qp_type != IB_QPT_GSI &&
- ibqp->qp_type != IB_QPT_SMI &&
- aqp1->mod_qp_parm) {
- /*
- * firmware will reject this modify_qp() because
- * port is not activated/initialized fully
- */
- ret = -EFAULT;
- ehca_warn(ibqp->device, "Couldn't modify qp port=%x: "
- "either port is being activated (try again) "
- "or cabling issue", attr->port_num);
- goto modify_qp_exit2;
- }
- mqpcb->prim_phys_port = attr->port_num;
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1);
- }
- if (attr_mask & IB_QP_QKEY) {
- mqpcb->qkey = attr->qkey;
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_QKEY, 1);
- }
- if (attr_mask & IB_QP_AV) {
- mqpcb->dlid = attr->ah_attr.dlid;
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID, 1);
- mqpcb->source_path_bits = attr->ah_attr.src_path_bits;
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS, 1);
- mqpcb->service_level = attr->ah_attr.sl;
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL, 1);
-
- if (ehca_calc_ipd(shca, mqpcb->prim_phys_port,
- attr->ah_attr.static_rate,
- &mqpcb->max_static_rate)) {
- ret = -EINVAL;
- goto modify_qp_exit2;
- }
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1);
-
- /*
- * Always supply the GRH flag, even if it's zero, to give the
- * hypervisor a clear "yes" or "no" instead of a "perhaps"
- */
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
-
- /*
- * only if GRH is TRUE we might consider SOURCE_GID_IDX
- * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
- */
- if (attr->ah_attr.ah_flags == IB_AH_GRH) {
- mqpcb->send_grh_flag = 1;
-
- mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index;
- update_mask |=
- EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1);
-
- for (cnt = 0; cnt < 16; cnt++)
- mqpcb->dest_gid.byte[cnt] =
- attr->ah_attr.grh.dgid.raw[cnt];
-
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_GID, 1);
- mqpcb->flow_label = attr->ah_attr.grh.flow_label;
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL, 1);
- mqpcb->hop_limit = attr->ah_attr.grh.hop_limit;
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT, 1);
- mqpcb->traffic_class = attr->ah_attr.grh.traffic_class;
- update_mask |=
- EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS, 1);
- }
- }
-
- if (attr_mask & IB_QP_PATH_MTU) {
- /* store ld(MTU) */
- my_qp->mtu_shift = attr->path_mtu + 7;
- mqpcb->path_mtu = attr->path_mtu;
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1);
- }
- if (attr_mask & IB_QP_TIMEOUT) {
- mqpcb->timeout = attr->timeout;
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT, 1);
- }
- if (attr_mask & IB_QP_RETRY_CNT) {
- mqpcb->retry_count = attr->retry_cnt;
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT, 1);
- }
- if (attr_mask & IB_QP_RNR_RETRY) {
- mqpcb->rnr_retry_count = attr->rnr_retry;
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT, 1);
- }
- if (attr_mask & IB_QP_RQ_PSN) {
- mqpcb->receive_psn = attr->rq_psn;
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RECEIVE_PSN, 1);
- }
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
- mqpcb->rdma_nr_atomic_resp_res = attr->max_dest_rd_atomic < 3 ?
- attr->max_dest_rd_atomic : 2;
- update_mask |=
- EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
- }
- if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
- mqpcb->rdma_atomic_outst_dest_qp = attr->max_rd_atomic < 3 ?
- attr->max_rd_atomic : 2;
- update_mask |=
- EHCA_BMASK_SET
- (MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP, 1);
- }
- if (attr_mask & IB_QP_ALT_PATH) {
- if (attr->alt_port_num < 1
- || attr->alt_port_num > shca->num_ports) {
- ret = -EINVAL;
- ehca_err(ibqp->device, "Invalid alt_port=%x. "
- "ehca_qp=%p qp_num=%x num_ports=%x",
- attr->alt_port_num, my_qp, ibqp->qp_num,
- shca->num_ports);
- goto modify_qp_exit2;
- }
- mqpcb->alt_phys_port = attr->alt_port_num;
-
- if (attr->alt_pkey_index >= 16) {
- ret = -EINVAL;
- ehca_err(ibqp->device, "Invalid alt_pkey_index=%x. "
- "ehca_qp=%p qp_num=%x max_pkey_index=f",
- attr->pkey_index, my_qp, ibqp->qp_num);
- goto modify_qp_exit2;
- }
- mqpcb->alt_p_key_idx = attr->alt_pkey_index;
-
- mqpcb->timeout_al = attr->alt_timeout;
- mqpcb->dlid_al = attr->alt_ah_attr.dlid;
- mqpcb->source_path_bits_al = attr->alt_ah_attr.src_path_bits;
- mqpcb->service_level_al = attr->alt_ah_attr.sl;
-
- if (ehca_calc_ipd(shca, mqpcb->alt_phys_port,
- attr->alt_ah_attr.static_rate,
- &mqpcb->max_static_rate_al)) {
- ret = -EINVAL;
- goto modify_qp_exit2;
- }
-
- /* OpenIB doesn't support alternate retry counts - copy them */
- mqpcb->retry_count_al = mqpcb->retry_count;
- mqpcb->rnr_retry_count_al = mqpcb->rnr_retry_count;
-
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_ALT_PHYS_PORT, 1)
- | EHCA_BMASK_SET(MQPCB_MASK_ALT_P_KEY_IDX, 1)
- | EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT_AL, 1)
- | EHCA_BMASK_SET(MQPCB_MASK_DLID_AL, 1)
- | EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS_AL, 1)
- | EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL_AL, 1)
- | EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE_AL, 1)
- | EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT_AL, 1)
- | EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT_AL, 1);
-
- /*
- * Always supply the GRH flag, even if it's zero, to give the
- * hypervisor a clear "yes" or "no" instead of a "perhaps"
- */
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG_AL, 1);
-
- /*
- * only if GRH is TRUE we might consider SOURCE_GID_IDX
- * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
- */
- if (attr->alt_ah_attr.ah_flags == IB_AH_GRH) {
- mqpcb->send_grh_flag_al = 1;
-
- for (cnt = 0; cnt < 16; cnt++)
- mqpcb->dest_gid_al.byte[cnt] =
- attr->alt_ah_attr.grh.dgid.raw[cnt];
- mqpcb->source_gid_idx_al =
- attr->alt_ah_attr.grh.sgid_index;
- mqpcb->flow_label_al = attr->alt_ah_attr.grh.flow_label;
- mqpcb->hop_limit_al = attr->alt_ah_attr.grh.hop_limit;
- mqpcb->traffic_class_al =
- attr->alt_ah_attr.grh.traffic_class;
-
- update_mask |=
- EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX_AL, 1)
- | EHCA_BMASK_SET(MQPCB_MASK_DEST_GID_AL, 1)
- | EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL_AL, 1)
- | EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT_AL, 1) |
- EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS_AL, 1);
- }
- }
-
- if (attr_mask & IB_QP_MIN_RNR_TIMER) {
- mqpcb->min_rnr_nak_timer_field = attr->min_rnr_timer;
- update_mask |=
- EHCA_BMASK_SET(MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD, 1);
- }
-
- if (attr_mask & IB_QP_SQ_PSN) {
- mqpcb->send_psn = attr->sq_psn;
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_PSN, 1);
- }
-
- if (attr_mask & IB_QP_DEST_QPN) {
- mqpcb->dest_qp_nr = attr->dest_qp_num;
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_QP_NR, 1);
- }
-
- if (attr_mask & IB_QP_PATH_MIG_STATE) {
- if (attr->path_mig_state != IB_MIG_REARM
- && attr->path_mig_state != IB_MIG_MIGRATED) {
- ret = -EINVAL;
- ehca_err(ibqp->device, "Invalid mig_state=%x",
- attr->path_mig_state);
- goto modify_qp_exit2;
- }
- mqpcb->path_migration_state = attr->path_mig_state + 1;
- if (attr->path_mig_state == IB_MIG_REARM)
- my_qp->mig_armed = 1;
- update_mask |=
- EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE, 1);
- }
-
- if (attr_mask & IB_QP_CAP) {
- mqpcb->max_nr_outst_send_wr = attr->cap.max_send_wr+1;
- update_mask |=
- EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_SEND_WR, 1);
- mqpcb->max_nr_outst_recv_wr = attr->cap.max_recv_wr+1;
- update_mask |=
- EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_RECV_WR, 1);
- /* no support for max_send/recv_sge yet */
- }
-
- if (ehca_debug_level >= 2)
- ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num);
-
- h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
- my_qp->ipz_qp_handle,
- &my_qp->pf,
- update_mask,
- mqpcb, my_qp->galpas.kernel);
-
- if (h_ret != H_SUCCESS) {
- ret = ehca2ib_return_code(h_ret);
- ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%lli "
- "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num);
- goto modify_qp_exit2;
- }
-
- if ((my_qp->qp_type == IB_QPT_UD ||
- my_qp->qp_type == IB_QPT_GSI ||
- my_qp->qp_type == IB_QPT_SMI) &&
- statetrans == IB_QPST_SQE2RTS) {
- /* doorbell to reprocessing wqes */
- iosync(); /* serialize GAL register access */
- hipz_update_sqa(my_qp, bad_wqe_cnt-1);
- ehca_gen_dbg("doorbell for %x wqes", bad_wqe_cnt);
- }
-
- if (statetrans == IB_QPST_RESET2INIT ||
- statetrans == IB_QPST_INIT2INIT) {
- mqpcb->qp_enable = 1;
- mqpcb->qp_state = EHCA_QPS_INIT;
- update_mask = 0;
- update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
-
- h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
- my_qp->ipz_qp_handle,
- &my_qp->pf,
- update_mask,
- mqpcb,
- my_qp->galpas.kernel);
-
- if (h_ret != H_SUCCESS) {
- ret = ehca2ib_return_code(h_ret);
- ehca_err(ibqp->device, "ENABLE in context of "
- "RESET_2_INIT failed! Maybe you didn't get "
- "a LID h_ret=%lli ehca_qp=%p qp_num=%x",
- h_ret, my_qp, ibqp->qp_num);
- goto modify_qp_exit2;
- }
- }
- if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)
- && !is_user) {
- ret = check_for_left_cqes(my_qp, shca);
- if (ret)
- goto modify_qp_exit2;
- }
-
- if (statetrans == IB_QPST_ANY2RESET) {
- ipz_qeit_reset(&my_qp->ipz_rqueue);
- ipz_qeit_reset(&my_qp->ipz_squeue);
-
- if (qp_cur_state == IB_QPS_ERR && !is_user) {
- del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
-
- if (HAS_RQ(my_qp))
- del_from_err_list(my_qp->recv_cq,
- &my_qp->rq_err_node);
- }
- if (!is_user)
- reset_queue_map(&my_qp->sq_map);
-
- if (HAS_RQ(my_qp) && !is_user)
- reset_queue_map(&my_qp->rq_map);
- }
-
- if (attr_mask & IB_QP_QKEY)
- my_qp->qkey = attr->qkey;
-
-modify_qp_exit2:
- if (squeue_locked) { /* this means: sqe -> rts */
- spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
- my_qp->sqerr_purgeflag = 1;
- }
-
-modify_qp_exit1:
- ehca_free_fw_ctrlblock(mqpcb);
-
- return ret;
-}
-
-int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
- struct ib_udata *udata)
-{
- int ret = 0;
-
- struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
- ib_device);
- struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
-
- /* The if-block below caches qp_attr to be modified for GSI and SMI
- * qps during the initialization by ib_mad. When the respective port
- * is activated, ie we got an event PORT_ACTIVE, we'll replay the
- * cached modify calls sequence, see ehca_recover_sqs() below.
- * Why that is required:
- * 1) If one port is connected, older code requires that port one
- * to be connected and module option nr_ports=1 to be given by
- * user, which is very inconvenient for end user.
- * 2) Firmware accepts modify_qp() only if respective port has become
- * active. Older code had a wait loop of 30sec create_qp()/
- * define_aqp1(), which is not appropriate in practice. This
- * code now removes that wait loop, see define_aqp1(), and always
- * reports all ports to ib_mad resp. users. Only activated ports
- * will then usable for the users.
- */
- if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
- int port = my_qp->init_attr.port_num;
- struct ehca_sport *sport = &shca->sport[port - 1];
- unsigned long flags;
- spin_lock_irqsave(&sport->mod_sqp_lock, flags);
- /* cache qp_attr only during init */
- if (my_qp->mod_qp_parm) {
- struct ehca_mod_qp_parm *p;
- if (my_qp->mod_qp_parm_idx >= EHCA_MOD_QP_PARM_MAX) {
- ehca_err(&shca->ib_device,
- "mod_qp_parm overflow state=%x port=%x"
- " type=%x", attr->qp_state,
- my_qp->init_attr.port_num,
- ibqp->qp_type);
- spin_unlock_irqrestore(&sport->mod_sqp_lock,
- flags);
- return -EINVAL;
- }
- p = &my_qp->mod_qp_parm[my_qp->mod_qp_parm_idx];
- p->mask = attr_mask;
- p->attr = *attr;
- my_qp->mod_qp_parm_idx++;
- ehca_dbg(&shca->ib_device,
- "Saved qp_attr for state=%x port=%x type=%x",
- attr->qp_state, my_qp->init_attr.port_num,
- ibqp->qp_type);
- spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
- goto out;
- }
- spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
- }
-
- ret = internal_modify_qp(ibqp, attr, attr_mask, 0);
-
-out:
- if ((ret == 0) && (attr_mask & IB_QP_STATE))
- my_qp->state = attr->qp_state;
-
- return ret;
-}
-
-void ehca_recover_sqp(struct ib_qp *sqp)
-{
- struct ehca_qp *my_sqp = container_of(sqp, struct ehca_qp, ib_qp);
- int port = my_sqp->init_attr.port_num;
- struct ib_qp_attr attr;
- struct ehca_mod_qp_parm *qp_parm;
- int i, qp_parm_idx, ret;
- unsigned long flags, wr_cnt;
-
- if (!my_sqp->mod_qp_parm)
- return;
- ehca_dbg(sqp->device, "SQP port=%x qp_num=%x", port, sqp->qp_num);
-
- qp_parm = my_sqp->mod_qp_parm;
- qp_parm_idx = my_sqp->mod_qp_parm_idx;
- for (i = 0; i < qp_parm_idx; i++) {
- attr = qp_parm[i].attr;
- ret = internal_modify_qp(sqp, &attr, qp_parm[i].mask, 0);
- if (ret) {
- ehca_err(sqp->device, "Could not modify SQP port=%x "
- "qp_num=%x ret=%x", port, sqp->qp_num, ret);
- goto free_qp_parm;
- }
- ehca_dbg(sqp->device, "SQP port=%x qp_num=%x in state=%x",
- port, sqp->qp_num, attr.qp_state);
- }
-
- /* re-trigger posted recv wrs */
- wr_cnt = my_sqp->ipz_rqueue.current_q_offset /
- my_sqp->ipz_rqueue.qe_size;
- if (wr_cnt) {
- spin_lock_irqsave(&my_sqp->spinlock_r, flags);
- hipz_update_rqa(my_sqp, wr_cnt);
- spin_unlock_irqrestore(&my_sqp->spinlock_r, flags);
- ehca_dbg(sqp->device, "doorbell port=%x qp_num=%x wr_cnt=%lx",
- port, sqp->qp_num, wr_cnt);
- }
-
-free_qp_parm:
- kfree(qp_parm);
- /* this prevents subsequent calls to modify_qp() to cache qp_attr */
- my_sqp->mod_qp_parm = NULL;
-}
-
-int ehca_query_qp(struct ib_qp *qp,
- struct ib_qp_attr *qp_attr,
- int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
-{
- struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
- struct ehca_shca *shca = container_of(qp->device, struct ehca_shca,
- ib_device);
- struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
- struct hcp_modify_qp_control_block *qpcb;
- int cnt, ret = 0;
- u64 h_ret;
-
- if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) {
- ehca_err(qp->device, "Invalid attribute mask "
- "ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
- my_qp, qp->qp_num, qp_attr_mask);
- return -EINVAL;
- }
-
- qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
- if (!qpcb) {
- ehca_err(qp->device, "Out of memory for qpcb "
- "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
- return -ENOMEM;
- }
-
- h_ret = hipz_h_query_qp(adapter_handle,
- my_qp->ipz_qp_handle,
- &my_qp->pf,
- qpcb, my_qp->galpas.kernel);
-
- if (h_ret != H_SUCCESS) {
- ret = ehca2ib_return_code(h_ret);
- ehca_err(qp->device, "hipz_h_query_qp() failed "
- "ehca_qp=%p qp_num=%x h_ret=%lli",
- my_qp, qp->qp_num, h_ret);
- goto query_qp_exit1;
- }
-
- qp_attr->cur_qp_state = ehca2ib_qp_state(qpcb->qp_state);
- qp_attr->qp_state = qp_attr->cur_qp_state;
-
- if (qp_attr->cur_qp_state == -EINVAL) {
- ret = -EINVAL;
- ehca_err(qp->device, "Got invalid ehca_qp_state=%x "
- "ehca_qp=%p qp_num=%x",
- qpcb->qp_state, my_qp, qp->qp_num);
- goto query_qp_exit1;
- }
-
- if (qp_attr->qp_state == IB_QPS_SQD)
- qp_attr->sq_draining = 1;
-
- qp_attr->qkey = qpcb->qkey;
- qp_attr->path_mtu = qpcb->path_mtu;
- qp_attr->path_mig_state = qpcb->path_migration_state - 1;
- qp_attr->rq_psn = qpcb->receive_psn;
- qp_attr->sq_psn = qpcb->send_psn;
- qp_attr->min_rnr_timer = qpcb->min_rnr_nak_timer_field;
- qp_attr->cap.max_send_wr = qpcb->max_nr_outst_send_wr-1;
- qp_attr->cap.max_recv_wr = qpcb->max_nr_outst_recv_wr-1;
- /* UD_AV CIRCUMVENTION */
- if (my_qp->qp_type == IB_QPT_UD) {
- qp_attr->cap.max_send_sge =
- qpcb->actual_nr_sges_in_sq_wqe - 2;
- qp_attr->cap.max_recv_sge =
- qpcb->actual_nr_sges_in_rq_wqe - 2;
- } else {
- qp_attr->cap.max_send_sge =
- qpcb->actual_nr_sges_in_sq_wqe;
- qp_attr->cap.max_recv_sge =
- qpcb->actual_nr_sges_in_rq_wqe;
- }
-
- qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size;
- qp_attr->dest_qp_num = qpcb->dest_qp_nr;
-
- qp_attr->pkey_index = qpcb->prim_p_key_idx;
- qp_attr->port_num = qpcb->prim_phys_port;
- qp_attr->timeout = qpcb->timeout;
- qp_attr->retry_cnt = qpcb->retry_count;
- qp_attr->rnr_retry = qpcb->rnr_retry_count;
-
- qp_attr->alt_pkey_index = qpcb->alt_p_key_idx;
- qp_attr->alt_port_num = qpcb->alt_phys_port;
- qp_attr->alt_timeout = qpcb->timeout_al;
-
- qp_attr->max_dest_rd_atomic = qpcb->rdma_nr_atomic_resp_res;
- qp_attr->max_rd_atomic = qpcb->rdma_atomic_outst_dest_qp;
-
- /* primary av */
- qp_attr->ah_attr.sl = qpcb->service_level;
-
- if (qpcb->send_grh_flag) {
- qp_attr->ah_attr.ah_flags = IB_AH_GRH;
- }
-
- qp_attr->ah_attr.static_rate = qpcb->max_static_rate;
- qp_attr->ah_attr.dlid = qpcb->dlid;
- qp_attr->ah_attr.src_path_bits = qpcb->source_path_bits;
- qp_attr->ah_attr.port_num = qp_attr->port_num;
-
- /* primary GRH */
- qp_attr->ah_attr.grh.traffic_class = qpcb->traffic_class;
- qp_attr->ah_attr.grh.hop_limit = qpcb->hop_limit;
- qp_attr->ah_attr.grh.sgid_index = qpcb->source_gid_idx;
- qp_attr->ah_attr.grh.flow_label = qpcb->flow_label;
-
- for (cnt = 0; cnt < 16; cnt++)
- qp_attr->ah_attr.grh.dgid.raw[cnt] =
- qpcb->dest_gid.byte[cnt];
-
- /* alternate AV */
- qp_attr->alt_ah_attr.sl = qpcb->service_level_al;
- if (qpcb->send_grh_flag_al) {
- qp_attr->alt_ah_attr.ah_flags = IB_AH_GRH;
- }
-
- qp_attr->alt_ah_attr.static_rate = qpcb->max_static_rate_al;
- qp_attr->alt_ah_attr.dlid = qpcb->dlid_al;
- qp_attr->alt_ah_attr.src_path_bits = qpcb->source_path_bits_al;
-
- /* alternate GRH */
- qp_attr->alt_ah_attr.grh.traffic_class = qpcb->traffic_class_al;
- qp_attr->alt_ah_attr.grh.hop_limit = qpcb->hop_limit_al;
- qp_attr->alt_ah_attr.grh.sgid_index = qpcb->source_gid_idx_al;
- qp_attr->alt_ah_attr.grh.flow_label = qpcb->flow_label_al;
-
- for (cnt = 0; cnt < 16; cnt++)
- qp_attr->alt_ah_attr.grh.dgid.raw[cnt] =
- qpcb->dest_gid_al.byte[cnt];
-
- /* return init attributes given in ehca_create_qp */
- if (qp_init_attr)
- *qp_init_attr = my_qp->init_attr;
-
- if (ehca_debug_level >= 2)
- ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num);
-
-query_qp_exit1:
- ehca_free_fw_ctrlblock(qpcb);
-
- return ret;
-}
-
-int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
- enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
-{
- struct ehca_qp *my_qp =
- container_of(ibsrq, struct ehca_qp, ib_srq);
- struct ehca_shca *shca =
- container_of(ibsrq->pd->device, struct ehca_shca, ib_device);
- struct hcp_modify_qp_control_block *mqpcb;
- u64 update_mask;
- u64 h_ret;
- int ret = 0;
-
- mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
- if (!mqpcb) {
- ehca_err(ibsrq->device, "Could not get zeroed page for mqpcb "
- "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
- return -ENOMEM;
- }
-
- update_mask = 0;
- if (attr_mask & IB_SRQ_LIMIT) {
- attr_mask &= ~IB_SRQ_LIMIT;
- update_mask |=
- EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1)
- | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1);
- mqpcb->curr_srq_limit = attr->srq_limit;
- mqpcb->qp_aff_asyn_ev_log_reg =
- EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1);
- }
-
- /* by now, all bits in attr_mask should have been cleared */
- if (attr_mask) {
- ehca_err(ibsrq->device, "invalid attribute mask bits set "
- "attr_mask=%x", attr_mask);
- ret = -EINVAL;
- goto modify_srq_exit0;
- }
-
- if (ehca_debug_level >= 2)
- ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
-
- h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle,
- NULL, update_mask, mqpcb,
- my_qp->galpas.kernel);
-
- if (h_ret != H_SUCCESS) {
- ret = ehca2ib_return_code(h_ret);
- ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%lli "
- "ehca_qp=%p qp_num=%x",
- h_ret, my_qp, my_qp->real_qp_num);
- }
-
-modify_srq_exit0:
- ehca_free_fw_ctrlblock(mqpcb);
-
- return ret;
-}
-
-int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
-{
- struct ehca_qp *my_qp = container_of(srq, struct ehca_qp, ib_srq);
- struct ehca_shca *shca = container_of(srq->device, struct ehca_shca,
- ib_device);
- struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
- struct hcp_modify_qp_control_block *qpcb;
- int ret = 0;
- u64 h_ret;
-
- qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
- if (!qpcb) {
- ehca_err(srq->device, "Out of memory for qpcb "
- "ehca_qp=%p qp_num=%x", my_qp, my_qp->real_qp_num);
- return -ENOMEM;
- }
-
- h_ret = hipz_h_query_qp(adapter_handle, my_qp->ipz_qp_handle,
- NULL, qpcb, my_qp->galpas.kernel);
-
- if (h_ret != H_SUCCESS) {
- ret = ehca2ib_return_code(h_ret);
- ehca_err(srq->device, "hipz_h_query_qp() failed "
- "ehca_qp=%p qp_num=%x h_ret=%lli",
- my_qp, my_qp->real_qp_num, h_ret);
- goto query_srq_exit1;
- }
-
- srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1;
- srq_attr->max_sge = 3;
- srq_attr->srq_limit = qpcb->curr_srq_limit;
-
- if (ehca_debug_level >= 2)
- ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
-
-query_srq_exit1:
- ehca_free_fw_ctrlblock(qpcb);
-
- return ret;
-}
-
-static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
- struct ib_uobject *uobject)
-{
- struct ehca_shca *shca = container_of(dev, struct ehca_shca, ib_device);
- struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
- ib_pd);
- struct ehca_sport *sport = &shca->sport[my_qp->init_attr.port_num - 1];
- u32 qp_num = my_qp->real_qp_num;
- int ret;
- u64 h_ret;
- u8 port_num;
- int is_user = 0;
- enum ib_qp_type qp_type;
- unsigned long flags;
-
- if (uobject) {
- is_user = 1;
- if (my_qp->mm_count_galpa ||
- my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
- ehca_err(dev, "Resources still referenced in "
- "user space qp_num=%x", qp_num);
- return -EINVAL;
- }
- }
-
- if (my_qp->send_cq) {
- ret = ehca_cq_unassign_qp(my_qp->send_cq, qp_num);
- if (ret) {
- ehca_err(dev, "Couldn't unassign qp from "
- "send_cq ret=%i qp_num=%x cq_num=%x", ret,
- qp_num, my_qp->send_cq->cq_number);
- return ret;
- }
- }
-
- write_lock_irqsave(&ehca_qp_idr_lock, flags);
- idr_remove(&ehca_qp_idr, my_qp->token);
- write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
-
- /*
- * SRQs will never get into an error list and do not have a recv_cq,
- * so we need to skip them here.
- */
- if (HAS_RQ(my_qp) && !IS_SRQ(my_qp) && !is_user)
- del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node);
-
- if (HAS_SQ(my_qp) && !is_user)
- del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
-
- /* now wait until all pending events have completed */
- wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events));
-
- h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
- if (h_ret != H_SUCCESS) {
- ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%lli "
- "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
- return ehca2ib_return_code(h_ret);
- }
-
- port_num = my_qp->init_attr.port_num;
- qp_type = my_qp->init_attr.qp_type;
-
- if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
- spin_lock_irqsave(&sport->mod_sqp_lock, flags);
- kfree(my_qp->mod_qp_parm);
- my_qp->mod_qp_parm = NULL;
- shca->sport[port_num - 1].ibqp_sqp[qp_type] = NULL;
- spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
- }
-
- /* no support for IB_QPT_SMI yet */
- if (qp_type == IB_QPT_GSI) {
- struct ib_event event;
- ehca_info(dev, "device %s: port %x is inactive.",
- shca->ib_device.name, port_num);
- event.device = &shca->ib_device;
- event.event = IB_EVENT_PORT_ERR;
- event.element.port_num = port_num;
- shca->sport[port_num - 1].port_state = IB_PORT_DOWN;
- ib_dispatch_event(&event);
- }
-
- if (HAS_RQ(my_qp)) {
- ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
- if (!is_user)
- vfree(my_qp->rq_map.map);
- }
- if (HAS_SQ(my_qp)) {
- ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
- if (!is_user)
- vfree(my_qp->sq_map.map);
- }
- kmem_cache_free(qp_cache, my_qp);
- atomic_dec(&shca->num_qps);
- return 0;
-}
-
-int ehca_destroy_qp(struct ib_qp *qp)
-{
- return internal_destroy_qp(qp->device,
- container_of(qp, struct ehca_qp, ib_qp),
- qp->uobject);
-}
-
-int ehca_destroy_srq(struct ib_srq *srq)
-{
- return internal_destroy_qp(srq->device,
- container_of(srq, struct ehca_qp, ib_srq),
- srq->uobject);
-}
-
-int ehca_init_qp_cache(void)
-{
- qp_cache = kmem_cache_create("ehca_cache_qp",
- sizeof(struct ehca_qp), 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!qp_cache)
- return -ENOMEM;
- return 0;
-}
-
-void ehca_cleanup_qp_cache(void)
-{
- kmem_cache_destroy(qp_cache);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_reqs.c b/drivers/staging/rdma/ehca/ehca_reqs.c
deleted file mode 100644
index 11813b880e16..000000000000
--- a/drivers/staging/rdma/ehca/ehca_reqs.c
+++ /dev/null
@@ -1,953 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * post_send/recv, poll_cq, req_notify
- *
- * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- * Waleri Fomin <fomin@de.ibm.com>
- * Joachim Fenkes <fenkes@de.ibm.com>
- * Reinhard Ernst <rernst@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "ehca_classes.h"
-#include "ehca_tools.h"
-#include "ehca_qes.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-#include "hipz_fns.h"
-
-/* in RC traffic, insert an empty RDMA READ every this many packets */
-#define ACK_CIRC_THRESHOLD 2000000
-
-static u64 replace_wr_id(u64 wr_id, u16 idx)
-{
- u64 ret;
-
- ret = wr_id & ~QMAP_IDX_MASK;
- ret |= idx & QMAP_IDX_MASK;
-
- return ret;
-}
-
-static u16 get_app_wr_id(u64 wr_id)
-{
- return wr_id & QMAP_IDX_MASK;
-}
-
-static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
- struct ehca_wqe *wqe_p,
- struct ib_recv_wr *recv_wr,
- u32 rq_map_idx)
-{
- u8 cnt_ds;
- if (unlikely((recv_wr->num_sge < 0) ||
- (recv_wr->num_sge > ipz_rqueue->act_nr_of_sg))) {
- ehca_gen_err("Invalid number of WQE SGE. "
- "num_sqe=%x max_nr_of_sg=%x",
- recv_wr->num_sge, ipz_rqueue->act_nr_of_sg);
- return -EINVAL; /* invalid SG list length */
- }
-
- /* clear wqe header until sglist */
- memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
-
- wqe_p->work_request_id = replace_wr_id(recv_wr->wr_id, rq_map_idx);
- wqe_p->nr_of_data_seg = recv_wr->num_sge;
-
- for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) {
- wqe_p->u.all_rcv.sg_list[cnt_ds].vaddr =
- recv_wr->sg_list[cnt_ds].addr;
- wqe_p->u.all_rcv.sg_list[cnt_ds].lkey =
- recv_wr->sg_list[cnt_ds].lkey;
- wqe_p->u.all_rcv.sg_list[cnt_ds].length =
- recv_wr->sg_list[cnt_ds].length;
- }
-
- if (ehca_debug_level >= 3) {
- ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
- ipz_rqueue);
- ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
- }
-
- return 0;
-}
-
-#if defined(DEBUG_GSI_SEND_WR)
-
-/* need ib_mad struct */
-#include <rdma/ib_mad.h>
-
-static void trace_ud_wr(const struct ib_ud_wr *ud_wr)
-{
- int idx;
- int j;
- while (ud_wr) {
- struct ib_mad_hdr *mad_hdr = ud_wrmad_hdr;
- struct ib_sge *sge = ud_wr->wr.sg_list;
- ehca_gen_dbg("ud_wr#%x wr_id=%lx num_sge=%x "
- "send_flags=%x opcode=%x", idx, ud_wr->wr.wr_id,
- ud_wr->wr.num_sge, ud_wr->wr.send_flags,
- ud_wr->.wr.opcode);
- if (mad_hdr) {
- ehca_gen_dbg("ud_wr#%x mad_hdr base_version=%x "
- "mgmt_class=%x class_version=%x method=%x "
- "status=%x class_specific=%x tid=%lx "
- "attr_id=%x resv=%x attr_mod=%x",
- idx, mad_hdr->base_version,
- mad_hdr->mgmt_class,
- mad_hdr->class_version, mad_hdr->method,
- mad_hdr->status, mad_hdr->class_specific,
- mad_hdr->tid, mad_hdr->attr_id,
- mad_hdr->resv,
- mad_hdr->attr_mod);
- }
- for (j = 0; j < ud_wr->wr.num_sge; j++) {
- u8 *data = __va(sge->addr);
- ehca_gen_dbg("ud_wr#%x sge#%x addr=%p length=%x "
- "lkey=%x",
- idx, j, data, sge->length, sge->lkey);
- /* assume length is n*16 */
- ehca_dmp(data, sge->length, "ud_wr#%x sge#%x",
- idx, j);
- sge++;
- } /* eof for j */
- idx++;
- ud_wr = ud_wr(ud_wr->wr.next);
- } /* eof while ud_wr */
-}
-
-#endif /* DEBUG_GSI_SEND_WR */
-
-static inline int ehca_write_swqe(struct ehca_qp *qp,
- struct ehca_wqe *wqe_p,
- struct ib_send_wr *send_wr,
- u32 sq_map_idx,
- int hidden)
-{
- u32 idx;
- u64 dma_length;
- struct ehca_av *my_av;
- u32 remote_qkey;
- struct ehca_qmap_entry *qmap_entry = &qp->sq_map.map[sq_map_idx];
-
- if (unlikely((send_wr->num_sge < 0) ||
- (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) {
- ehca_gen_err("Invalid number of WQE SGE. "
- "num_sqe=%x max_nr_of_sg=%x",
- send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg);
- return -EINVAL; /* invalid SG list length */
- }
-
- /* clear wqe header until sglist */
- memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
-
- wqe_p->work_request_id = replace_wr_id(send_wr->wr_id, sq_map_idx);
-
- qmap_entry->app_wr_id = get_app_wr_id(send_wr->wr_id);
- qmap_entry->reported = 0;
- qmap_entry->cqe_req = 0;
-
- switch (send_wr->opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_IMM:
- wqe_p->optype = WQE_OPTYPE_SEND;
- break;
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- wqe_p->optype = WQE_OPTYPE_RDMAWRITE;
- break;
- case IB_WR_RDMA_READ:
- wqe_p->optype = WQE_OPTYPE_RDMAREAD;
- break;
- default:
- ehca_gen_err("Invalid opcode=%x", send_wr->opcode);
- return -EINVAL; /* invalid opcode */
- }
-
- wqe_p->wqef = (send_wr->opcode) & WQEF_HIGH_NIBBLE;
-
- wqe_p->wr_flag = 0;
-
- if ((send_wr->send_flags & IB_SEND_SIGNALED ||
- qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR)
- && !hidden) {
- wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
- qmap_entry->cqe_req = 1;
- }
-
- if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
- send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
- /* this might not work as long as HW does not support it */
- wqe_p->immediate_data = be32_to_cpu(send_wr->ex.imm_data);
- wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT;
- }
-
- wqe_p->nr_of_data_seg = send_wr->num_sge;
-
- switch (qp->qp_type) {
- case IB_QPT_SMI:
- case IB_QPT_GSI:
- /* no break is intential here */
- case IB_QPT_UD:
- /* IB 1.2 spec C10-15 compliance */
- remote_qkey = ud_wr(send_wr)->remote_qkey;
- if (remote_qkey & 0x80000000)
- remote_qkey = qp->qkey;
-
- wqe_p->destination_qp_number = ud_wr(send_wr)->remote_qpn << 8;
- wqe_p->local_ee_context_qkey = remote_qkey;
- if (unlikely(!ud_wr(send_wr)->ah)) {
- ehca_gen_err("ud_wr(send_wr) is NULL. qp=%p", qp);
- return -EINVAL;
- }
- if (unlikely(ud_wr(send_wr)->remote_qpn == 0)) {
- ehca_gen_err("dest QP# is 0. qp=%x", qp->real_qp_num);
- return -EINVAL;
- }
- my_av = container_of(ud_wr(send_wr)->ah, struct ehca_av, ib_ah);
- wqe_p->u.ud_av.ud_av = my_av->av;
-
- /*
- * omitted check of IB_SEND_INLINE
- * since HW does not support it
- */
- for (idx = 0; idx < send_wr->num_sge; idx++) {
- wqe_p->u.ud_av.sg_list[idx].vaddr =
- send_wr->sg_list[idx].addr;
- wqe_p->u.ud_av.sg_list[idx].lkey =
- send_wr->sg_list[idx].lkey;
- wqe_p->u.ud_av.sg_list[idx].length =
- send_wr->sg_list[idx].length;
- } /* eof for idx */
- if (qp->qp_type == IB_QPT_SMI ||
- qp->qp_type == IB_QPT_GSI)
- wqe_p->u.ud_av.ud_av.pmtu = 1;
- if (qp->qp_type == IB_QPT_GSI) {
- wqe_p->pkeyi = ud_wr(send_wr)->pkey_index;
-#ifdef DEBUG_GSI_SEND_WR
- trace_ud_wr(ud_wr(send_wr));
-#endif /* DEBUG_GSI_SEND_WR */
- }
- break;
-
- case IB_QPT_UC:
- if (send_wr->send_flags & IB_SEND_FENCE)
- wqe_p->wr_flag |= WQE_WRFLAG_FENCE;
- /* no break is intentional here */
- case IB_QPT_RC:
- /* TODO: atomic not implemented */
- wqe_p->u.nud.remote_virtual_address =
- rdma_wr(send_wr)->remote_addr;
- wqe_p->u.nud.rkey = rdma_wr(send_wr)->rkey;
-
- /*
- * omitted checking of IB_SEND_INLINE
- * since HW does not support it
- */
- dma_length = 0;
- for (idx = 0; idx < send_wr->num_sge; idx++) {
- wqe_p->u.nud.sg_list[idx].vaddr =
- send_wr->sg_list[idx].addr;
- wqe_p->u.nud.sg_list[idx].lkey =
- send_wr->sg_list[idx].lkey;
- wqe_p->u.nud.sg_list[idx].length =
- send_wr->sg_list[idx].length;
- dma_length += send_wr->sg_list[idx].length;
- } /* eof idx */
- wqe_p->u.nud.atomic_1st_op_dma_len = dma_length;
-
- /* unsolicited ack circumvention */
- if (send_wr->opcode == IB_WR_RDMA_READ) {
- /* on RDMA read, switch on and reset counters */
- qp->message_count = qp->packet_count = 0;
- qp->unsol_ack_circ = 1;
- } else
- /* else estimate #packets */
- qp->packet_count += (dma_length >> qp->mtu_shift) + 1;
-
- break;
-
- default:
- ehca_gen_err("Invalid qptype=%x", qp->qp_type);
- return -EINVAL;
- }
-
- if (ehca_debug_level >= 3) {
- ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
- ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
- }
- return 0;
-}
-
-/* map_ib_wc_status converts raw cqe_status to ib_wc_status */
-static inline void map_ib_wc_status(u32 cqe_status,
- enum ib_wc_status *wc_status)
-{
- if (unlikely(cqe_status & WC_STATUS_ERROR_BIT)) {
- switch (cqe_status & 0x3F) {
- case 0x01:
- case 0x21:
- *wc_status = IB_WC_LOC_LEN_ERR;
- break;
- case 0x02:
- case 0x22:
- *wc_status = IB_WC_LOC_QP_OP_ERR;
- break;
- case 0x03:
- case 0x23:
- *wc_status = IB_WC_LOC_EEC_OP_ERR;
- break;
- case 0x04:
- case 0x24:
- *wc_status = IB_WC_LOC_PROT_ERR;
- break;
- case 0x05:
- case 0x25:
- *wc_status = IB_WC_WR_FLUSH_ERR;
- break;
- case 0x06:
- *wc_status = IB_WC_MW_BIND_ERR;
- break;
- case 0x07: /* remote error - look into bits 20:24 */
- switch ((cqe_status
- & WC_STATUS_REMOTE_ERROR_FLAGS) >> 11) {
- case 0x0:
- /*
- * PSN Sequence Error!
- * couldn't find a matching status!
- */
- *wc_status = IB_WC_GENERAL_ERR;
- break;
- case 0x1:
- *wc_status = IB_WC_REM_INV_REQ_ERR;
- break;
- case 0x2:
- *wc_status = IB_WC_REM_ACCESS_ERR;
- break;
- case 0x3:
- *wc_status = IB_WC_REM_OP_ERR;
- break;
- case 0x4:
- *wc_status = IB_WC_REM_INV_RD_REQ_ERR;
- break;
- }
- break;
- case 0x08:
- *wc_status = IB_WC_RETRY_EXC_ERR;
- break;
- case 0x09:
- *wc_status = IB_WC_RNR_RETRY_EXC_ERR;
- break;
- case 0x0A:
- case 0x2D:
- *wc_status = IB_WC_REM_ABORT_ERR;
- break;
- case 0x0B:
- case 0x2E:
- *wc_status = IB_WC_INV_EECN_ERR;
- break;
- case 0x0C:
- case 0x2F:
- *wc_status = IB_WC_INV_EEC_STATE_ERR;
- break;
- case 0x0D:
- *wc_status = IB_WC_BAD_RESP_ERR;
- break;
- case 0x10:
- /* WQE purged */
- *wc_status = IB_WC_WR_FLUSH_ERR;
- break;
- default:
- *wc_status = IB_WC_FATAL_ERR;
-
- }
- } else
- *wc_status = IB_WC_SUCCESS;
-}
-
-static inline int post_one_send(struct ehca_qp *my_qp,
- struct ib_send_wr *cur_send_wr,
- int hidden)
-{
- struct ehca_wqe *wqe_p;
- int ret;
- u32 sq_map_idx;
- u64 start_offset = my_qp->ipz_squeue.current_q_offset;
-
- /* get pointer next to free WQE */
- wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
- if (unlikely(!wqe_p)) {
- /* too many posted work requests: queue overflow */
- ehca_err(my_qp->ib_qp.device, "Too many posted WQEs "
- "qp_num=%x", my_qp->ib_qp.qp_num);
- return -ENOMEM;
- }
-
- /*
- * Get the index of the WQE in the send queue. The same index is used
- * for writing into the sq_map.
- */
- sq_map_idx = start_offset / my_qp->ipz_squeue.qe_size;
-
- /* write a SEND WQE into the QUEUE */
- ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, sq_map_idx, hidden);
- /*
- * if something failed,
- * reset the free entry pointer to the start value
- */
- if (unlikely(ret)) {
- my_qp->ipz_squeue.current_q_offset = start_offset;
- ehca_err(my_qp->ib_qp.device, "Could not write WQE "
- "qp_num=%x", my_qp->ib_qp.qp_num);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int ehca_post_send(struct ib_qp *qp,
- struct ib_send_wr *send_wr,
- struct ib_send_wr **bad_send_wr)
-{
- struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
- int wqe_cnt = 0;
- int ret = 0;
- unsigned long flags;
-
- /* Reject WR if QP is in RESET, INIT or RTR state */
- if (unlikely(my_qp->state < IB_QPS_RTS)) {
- ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x",
- my_qp->state, qp->qp_num);
- ret = -EINVAL;
- goto out;
- }
-
- /* LOCK the QUEUE */
- spin_lock_irqsave(&my_qp->spinlock_s, flags);
-
- /* Send an empty extra RDMA read if:
- * 1) there has been an RDMA read on this connection before
- * 2) no RDMA read occurred for ACK_CIRC_THRESHOLD link packets
- * 3) we can be sure that any previous extra RDMA read has been
- * processed so we don't overflow the SQ
- */
- if (unlikely(my_qp->unsol_ack_circ &&
- my_qp->packet_count > ACK_CIRC_THRESHOLD &&
- my_qp->message_count > my_qp->init_attr.cap.max_send_wr)) {
- /* insert an empty RDMA READ to fix up the remote QP state */
- struct ib_send_wr circ_wr;
- memset(&circ_wr, 0, sizeof(circ_wr));
- circ_wr.opcode = IB_WR_RDMA_READ;
- post_one_send(my_qp, &circ_wr, 1); /* ignore retcode */
- wqe_cnt++;
- ehca_dbg(qp->device, "posted circ wr qp_num=%x", qp->qp_num);
- my_qp->message_count = my_qp->packet_count = 0;
- }
-
- /* loop processes list of send reqs */
- while (send_wr) {
- ret = post_one_send(my_qp, send_wr, 0);
- if (unlikely(ret)) {
- goto post_send_exit0;
- }
- wqe_cnt++;
- send_wr = send_wr->next;
- }
-
-post_send_exit0:
- iosync(); /* serialize GAL register access */
- hipz_update_sqa(my_qp, wqe_cnt);
- if (unlikely(ret || ehca_debug_level >= 2))
- ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
- my_qp, qp->qp_num, wqe_cnt, ret);
- my_qp->message_count += wqe_cnt;
- spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
-
-out:
- if (ret)
- *bad_send_wr = send_wr;
- return ret;
-}
-
-static int internal_post_recv(struct ehca_qp *my_qp,
- struct ib_device *dev,
- struct ib_recv_wr *recv_wr,
- struct ib_recv_wr **bad_recv_wr)
-{
- struct ehca_wqe *wqe_p;
- int wqe_cnt = 0;
- int ret = 0;
- u32 rq_map_idx;
- unsigned long flags;
- struct ehca_qmap_entry *qmap_entry;
-
- if (unlikely(!HAS_RQ(my_qp))) {
- ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d",
- my_qp, my_qp->real_qp_num, my_qp->ext_type);
- ret = -ENODEV;
- goto out;
- }
-
- /* LOCK the QUEUE */
- spin_lock_irqsave(&my_qp->spinlock_r, flags);
-
- /* loop processes list of recv reqs */
- while (recv_wr) {
- u64 start_offset = my_qp->ipz_rqueue.current_q_offset;
- /* get pointer next to free WQE */
- wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue);
- if (unlikely(!wqe_p)) {
- /* too many posted work requests: queue overflow */
- ret = -ENOMEM;
- ehca_err(dev, "Too many posted WQEs "
- "qp_num=%x", my_qp->real_qp_num);
- goto post_recv_exit0;
- }
- /*
- * Get the index of the WQE in the recv queue. The same index
- * is used for writing into the rq_map.
- */
- rq_map_idx = start_offset / my_qp->ipz_rqueue.qe_size;
-
- /* write a RECV WQE into the QUEUE */
- ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, recv_wr,
- rq_map_idx);
- /*
- * if something failed,
- * reset the free entry pointer to the start value
- */
- if (unlikely(ret)) {
- my_qp->ipz_rqueue.current_q_offset = start_offset;
- ret = -EINVAL;
- ehca_err(dev, "Could not write WQE "
- "qp_num=%x", my_qp->real_qp_num);
- goto post_recv_exit0;
- }
-
- qmap_entry = &my_qp->rq_map.map[rq_map_idx];
- qmap_entry->app_wr_id = get_app_wr_id(recv_wr->wr_id);
- qmap_entry->reported = 0;
- qmap_entry->cqe_req = 1;
-
- wqe_cnt++;
- recv_wr = recv_wr->next;
- } /* eof for recv_wr */
-
-post_recv_exit0:
- iosync(); /* serialize GAL register access */
- hipz_update_rqa(my_qp, wqe_cnt);
- if (unlikely(ret || ehca_debug_level >= 2))
- ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
- my_qp, my_qp->real_qp_num, wqe_cnt, ret);
- spin_unlock_irqrestore(&my_qp->spinlock_r, flags);
-
-out:
- if (ret)
- *bad_recv_wr = recv_wr;
-
- return ret;
-}
-
-int ehca_post_recv(struct ib_qp *qp,
- struct ib_recv_wr *recv_wr,
- struct ib_recv_wr **bad_recv_wr)
-{
- struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
-
- /* Reject WR if QP is in RESET state */
- if (unlikely(my_qp->state == IB_QPS_RESET)) {
- ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x",
- my_qp->state, qp->qp_num);
- *bad_recv_wr = recv_wr;
- return -EINVAL;
- }
-
- return internal_post_recv(my_qp, qp->device, recv_wr, bad_recv_wr);
-}
-
-int ehca_post_srq_recv(struct ib_srq *srq,
- struct ib_recv_wr *recv_wr,
- struct ib_recv_wr **bad_recv_wr)
-{
- return internal_post_recv(container_of(srq, struct ehca_qp, ib_srq),
- srq->device, recv_wr, bad_recv_wr);
-}
-
-/*
- * ib_wc_opcode table converts ehca wc opcode to ib
- * Since we use zero to indicate invalid opcode, the actual ib opcode must
- * be decremented!!!
- */
-static const u8 ib_wc_opcode[255] = {
- [0x01] = IB_WC_RECV+1,
- [0x02] = IB_WC_RECV_RDMA_WITH_IMM+1,
- [0x08] = IB_WC_FETCH_ADD+1,
- [0x10] = IB_WC_COMP_SWAP+1,
- [0x20] = IB_WC_RDMA_WRITE+1,
- [0x40] = IB_WC_RDMA_READ+1,
- [0x80] = IB_WC_SEND+1
-};
-
-/* internal function to poll one entry of cq */
-static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
-{
- int ret = 0, qmap_tail_idx;
- struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
- struct ehca_cqe *cqe;
- struct ehca_qp *my_qp;
- struct ehca_qmap_entry *qmap_entry;
- struct ehca_queue_map *qmap;
- int cqe_count = 0, is_error;
-
-repoll:
- cqe = (struct ehca_cqe *)
- ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
- if (!cqe) {
- ret = -EAGAIN;
- if (ehca_debug_level >= 3)
- ehca_dbg(cq->device, "Completion queue is empty "
- "my_cq=%p cq_num=%x", my_cq, my_cq->cq_number);
- goto poll_cq_one_exit0;
- }
-
- /* prevents loads being reordered across this point */
- rmb();
-
- cqe_count++;
- if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
- struct ehca_qp *qp;
- int purgeflag;
- unsigned long flags;
-
- qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number);
- if (!qp) {
- ehca_err(cq->device, "cq_num=%x qp_num=%x "
- "could not find qp -> ignore cqe",
- my_cq->cq_number, cqe->local_qp_number);
- ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x",
- my_cq->cq_number, cqe->local_qp_number);
- /* ignore this purged cqe */
- goto repoll;
- }
- spin_lock_irqsave(&qp->spinlock_s, flags);
- purgeflag = qp->sqerr_purgeflag;
- spin_unlock_irqrestore(&qp->spinlock_s, flags);
-
- if (purgeflag) {
- ehca_dbg(cq->device,
- "Got CQE with purged bit qp_num=%x src_qp=%x",
- cqe->local_qp_number, cqe->remote_qp_number);
- if (ehca_debug_level >= 2)
- ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
- cqe->local_qp_number,
- cqe->remote_qp_number);
- /*
- * ignore this to avoid double cqes of bad wqe
- * that caused sqe and turn off purge flag
- */
- qp->sqerr_purgeflag = 0;
- goto repoll;
- }
- }
-
- is_error = cqe->status & WC_STATUS_ERROR_BIT;
-
- /* trace error CQEs if debug_level >= 1, trace all CQEs if >= 3 */
- if (unlikely(ehca_debug_level >= 3 || (ehca_debug_level && is_error))) {
- ehca_dbg(cq->device,
- "Received %sCOMPLETION ehca_cq=%p cq_num=%x -----",
- is_error ? "ERROR " : "", my_cq, my_cq->cq_number);
- ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
- my_cq, my_cq->cq_number);
- ehca_dbg(cq->device,
- "ehca_cq=%p cq_num=%x -------------------------",
- my_cq, my_cq->cq_number);
- }
-
- read_lock(&ehca_qp_idr_lock);
- my_qp = idr_find(&ehca_qp_idr, cqe->qp_token);
- read_unlock(&ehca_qp_idr_lock);
- if (!my_qp)
- goto repoll;
- wc->qp = &my_qp->ib_qp;
-
- qmap_tail_idx = get_app_wr_id(cqe->work_request_id);
- if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT))
- /* We got a send completion. */
- qmap = &my_qp->sq_map;
- else
- /* We got a receive completion. */
- qmap = &my_qp->rq_map;
-
- /* advance the tail pointer */
- qmap->tail = qmap_tail_idx;
-
- if (is_error) {
- /*
- * set left_to_poll to 0 because in error state, we will not
- * get any additional CQEs
- */
- my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
- my_qp->sq_map.entries);
- my_qp->sq_map.left_to_poll = 0;
- ehca_add_to_err_list(my_qp, 1);
-
- my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
- my_qp->rq_map.entries);
- my_qp->rq_map.left_to_poll = 0;
- if (HAS_RQ(my_qp))
- ehca_add_to_err_list(my_qp, 0);
- }
-
- qmap_entry = &qmap->map[qmap_tail_idx];
- if (qmap_entry->reported) {
- ehca_warn(cq->device, "Double cqe on qp_num=%#x",
- my_qp->real_qp_num);
- /* found a double cqe, discard it and read next one */
- goto repoll;
- }
-
- wc->wr_id = replace_wr_id(cqe->work_request_id, qmap_entry->app_wr_id);
- qmap_entry->reported = 1;
-
- /* if left_to_poll is decremented to 0, add the QP to the error list */
- if (qmap->left_to_poll > 0) {
- qmap->left_to_poll--;
- if ((my_qp->sq_map.left_to_poll == 0) &&
- (my_qp->rq_map.left_to_poll == 0)) {
- ehca_add_to_err_list(my_qp, 1);
- if (HAS_RQ(my_qp))
- ehca_add_to_err_list(my_qp, 0);
- }
- }
-
- /* eval ib_wc_opcode */
- wc->opcode = ib_wc_opcode[cqe->optype]-1;
- if (unlikely(wc->opcode == -1)) {
- ehca_err(cq->device, "Invalid cqe->OPType=%x cqe->status=%x "
- "ehca_cq=%p cq_num=%x",
- cqe->optype, cqe->status, my_cq, my_cq->cq_number);
- /* dump cqe for other infos */
- ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
- my_cq, my_cq->cq_number);
- /* update also queue adder to throw away this entry!!! */
- goto repoll;
- }
-
- /* eval ib_wc_status */
- if (unlikely(is_error)) {
- /* complete with errors */
- map_ib_wc_status(cqe->status, &wc->status);
- wc->vendor_err = wc->status;
- } else
- wc->status = IB_WC_SUCCESS;
-
- wc->byte_len = cqe->nr_bytes_transferred;
- wc->pkey_index = cqe->pkey_index;
- wc->slid = cqe->rlid;
- wc->dlid_path_bits = cqe->dlid;
- wc->src_qp = cqe->remote_qp_number;
- /*
- * HW has "Immed data present" and "GRH present" in bits 6 and 5.
- * SW defines those in bits 1 and 0, so we can just shift and mask.
- */
- wc->wc_flags = (cqe->w_completion_flags >> 5) & 3;
- wc->ex.imm_data = cpu_to_be32(cqe->immediate_data);
- wc->sl = cqe->service_level;
-
-poll_cq_one_exit0:
- if (cqe_count > 0)
- hipz_update_feca(my_cq, cqe_count);
-
- return ret;
-}
-
-static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,
- struct ib_wc *wc, int num_entries,
- struct ipz_queue *ipz_queue, int on_sq)
-{
- int nr = 0;
- struct ehca_wqe *wqe;
- u64 offset;
- struct ehca_queue_map *qmap;
- struct ehca_qmap_entry *qmap_entry;
-
- if (on_sq)
- qmap = &my_qp->sq_map;
- else
- qmap = &my_qp->rq_map;
-
- qmap_entry = &qmap->map[qmap->next_wqe_idx];
-
- while ((nr < num_entries) && (qmap_entry->reported == 0)) {
- /* generate flush CQE */
-
- memset(wc, 0, sizeof(*wc));
-
- offset = qmap->next_wqe_idx * ipz_queue->qe_size;
- wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset);
- if (!wqe) {
- ehca_err(cq->device, "Invalid wqe offset=%#llx on "
- "qp_num=%#x", offset, my_qp->real_qp_num);
- return nr;
- }
-
- wc->wr_id = replace_wr_id(wqe->work_request_id,
- qmap_entry->app_wr_id);
-
- if (on_sq) {
- switch (wqe->optype) {
- case WQE_OPTYPE_SEND:
- wc->opcode = IB_WC_SEND;
- break;
- case WQE_OPTYPE_RDMAWRITE:
- wc->opcode = IB_WC_RDMA_WRITE;
- break;
- case WQE_OPTYPE_RDMAREAD:
- wc->opcode = IB_WC_RDMA_READ;
- break;
- default:
- ehca_err(cq->device, "Invalid optype=%x",
- wqe->optype);
- return nr;
- }
- } else
- wc->opcode = IB_WC_RECV;
-
- if (wqe->wr_flag & WQE_WRFLAG_IMM_DATA_PRESENT) {
- wc->ex.imm_data = wqe->immediate_data;
- wc->wc_flags |= IB_WC_WITH_IMM;
- }
-
- wc->status = IB_WC_WR_FLUSH_ERR;
-
- wc->qp = &my_qp->ib_qp;
-
- /* mark as reported and advance next_wqe pointer */
- qmap_entry->reported = 1;
- qmap->next_wqe_idx = next_index(qmap->next_wqe_idx,
- qmap->entries);
- qmap_entry = &qmap->map[qmap->next_wqe_idx];
-
- wc++; nr++;
- }
-
- return nr;
-
-}
-
-int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
-{
- struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
- int nr;
- struct ehca_qp *err_qp;
- struct ib_wc *current_wc = wc;
- int ret = 0;
- unsigned long flags;
- int entries_left = num_entries;
-
- if (num_entries < 1) {
- ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p "
- "cq_num=%x", num_entries, my_cq, my_cq->cq_number);
- ret = -EINVAL;
- goto poll_cq_exit0;
- }
-
- spin_lock_irqsave(&my_cq->spinlock, flags);
-
- /* generate flush cqes for send queues */
- list_for_each_entry(err_qp, &my_cq->sqp_err_list, sq_err_node) {
- nr = generate_flush_cqes(err_qp, cq, current_wc, entries_left,
- &err_qp->ipz_squeue, 1);
- entries_left -= nr;
- current_wc += nr;
-
- if (entries_left == 0)
- break;
- }
-
- /* generate flush cqes for receive queues */
- list_for_each_entry(err_qp, &my_cq->rqp_err_list, rq_err_node) {
- nr = generate_flush_cqes(err_qp, cq, current_wc, entries_left,
- &err_qp->ipz_rqueue, 0);
- entries_left -= nr;
- current_wc += nr;
-
- if (entries_left == 0)
- break;
- }
-
- for (nr = 0; nr < entries_left; nr++) {
- ret = ehca_poll_cq_one(cq, current_wc);
- if (ret)
- break;
- current_wc++;
- } /* eof for nr */
- entries_left -= nr;
-
- spin_unlock_irqrestore(&my_cq->spinlock, flags);
- if (ret == -EAGAIN || !ret)
- ret = num_entries - entries_left;
-
-poll_cq_exit0:
- return ret;
-}
-
-int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags)
-{
- struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
- int ret = 0;
-
- switch (notify_flags & IB_CQ_SOLICITED_MASK) {
- case IB_CQ_SOLICITED:
- hipz_set_cqx_n0(my_cq, 1);
- break;
- case IB_CQ_NEXT_COMP:
- hipz_set_cqx_n1(my_cq, 1);
- break;
- default:
- return -EINVAL;
- }
-
- if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
- unsigned long spl_flags;
- spin_lock_irqsave(&my_cq->spinlock, spl_flags);
- ret = ipz_qeit_is_valid(&my_cq->ipz_queue);
- spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);
- }
-
- return ret;
-}
diff --git a/drivers/staging/rdma/ehca/ehca_sqp.c b/drivers/staging/rdma/ehca/ehca_sqp.c
deleted file mode 100644
index 376b031c2c7f..000000000000
--- a/drivers/staging/rdma/ehca/ehca_sqp.c
+++ /dev/null
@@ -1,245 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * SQP functions
- *
- * Authors: Khadija Souissi <souissi@de.ibm.com>
- * Heiko J Schick <schickhj@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <rdma/ib_mad.h>
-
-#include "ehca_classes.h"
-#include "ehca_tools.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-
-#define IB_MAD_STATUS_REDIRECT cpu_to_be16(0x0002)
-#define IB_MAD_STATUS_UNSUP_VERSION cpu_to_be16(0x0004)
-#define IB_MAD_STATUS_UNSUP_METHOD cpu_to_be16(0x0008)
-
-#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
-
-/**
- * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
- * pair is created successfully, the corresponding port gets active.
- *
- * Define Special Queue pair 0 (SMI QP) is still not supported.
- *
- * @qp_init_attr: Queue pair init attributes with port and queue pair type
- */
-
-u64 ehca_define_sqp(struct ehca_shca *shca,
- struct ehca_qp *ehca_qp,
- struct ib_qp_init_attr *qp_init_attr)
-{
- u32 pma_qp_nr, bma_qp_nr;
- u64 ret;
- u8 port = qp_init_attr->port_num;
- int counter;
-
- shca->sport[port - 1].port_state = IB_PORT_DOWN;
-
- switch (qp_init_attr->qp_type) {
- case IB_QPT_SMI:
- /* function not supported yet */
- break;
- case IB_QPT_GSI:
- ret = hipz_h_define_aqp1(shca->ipz_hca_handle,
- ehca_qp->ipz_qp_handle,
- ehca_qp->galpas.kernel,
- (u32) qp_init_attr->port_num,
- &pma_qp_nr, &bma_qp_nr);
-
- if (ret != H_SUCCESS) {
- ehca_err(&shca->ib_device,
- "Can't define AQP1 for port %x. h_ret=%lli",
- port, ret);
- return ret;
- }
- shca->sport[port - 1].pma_qp_nr = pma_qp_nr;
- ehca_dbg(&shca->ib_device, "port=%x pma_qp_nr=%x",
- port, pma_qp_nr);
- break;
- default:
- ehca_err(&shca->ib_device, "invalid qp_type=%x",
- qp_init_attr->qp_type);
- return H_PARAMETER;
- }
-
- if (ehca_nr_ports < 0) /* autodetect mode */
- return H_SUCCESS;
-
- for (counter = 0;
- shca->sport[port - 1].port_state != IB_PORT_ACTIVE &&
- counter < ehca_port_act_time;
- counter++) {
- ehca_dbg(&shca->ib_device, "... wait until port %x is active",
- port);
- msleep_interruptible(1000);
- }
-
- if (counter == ehca_port_act_time) {
- ehca_err(&shca->ib_device, "Port %x is not active.", port);
- return H_HARDWARE;
- }
-
- return H_SUCCESS;
-}
-
-struct ib_perf {
- struct ib_mad_hdr mad_hdr;
- u8 reserved[40];
- u8 data[192];
-} __attribute__ ((packed));
-
-/* TC/SL/FL packed into 32 bits, as in ClassPortInfo */
-struct tcslfl {
- u32 tc:8;
- u32 sl:4;
- u32 fl:20;
-} __attribute__ ((packed));
-
-/* IP Version/TC/FL packed into 32 bits, as in GRH */
-struct vertcfl {
- u32 ver:4;
- u32 tc:8;
- u32 fl:20;
-} __attribute__ ((packed));
-
-static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
- const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad *in_mad, struct ib_mad *out_mad)
-{
- const struct ib_perf *in_perf = (const struct ib_perf *)in_mad;
- struct ib_perf *out_perf = (struct ib_perf *)out_mad;
- struct ib_class_port_info *poi =
- (struct ib_class_port_info *)out_perf->data;
- struct tcslfl *tcslfl =
- (struct tcslfl *)&poi->redirect_tcslfl;
- struct ehca_shca *shca =
- container_of(ibdev, struct ehca_shca, ib_device);
- struct ehca_sport *sport = &shca->sport[port_num - 1];
-
- ehca_dbg(ibdev, "method=%x", in_perf->mad_hdr.method);
-
- *out_mad = *in_mad;
-
- if (in_perf->mad_hdr.class_version != 1) {
- ehca_warn(ibdev, "Unsupported class_version=%x",
- in_perf->mad_hdr.class_version);
- out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_VERSION;
- goto perf_reply;
- }
-
- switch (in_perf->mad_hdr.method) {
- case IB_MGMT_METHOD_GET:
- case IB_MGMT_METHOD_SET:
- /* set class port info for redirection */
- out_perf->mad_hdr.attr_id = IB_PMA_CLASS_PORT_INFO;
- out_perf->mad_hdr.status = IB_MAD_STATUS_REDIRECT;
- memset(poi, 0, sizeof(*poi));
- poi->base_version = 1;
- poi->class_version = 1;
- poi->resp_time_value = 18;
-
- /* copy local routing information from WC where applicable */
- tcslfl->sl = in_wc->sl;
- poi->redirect_lid =
- sport->saved_attr.lid | in_wc->dlid_path_bits;
- poi->redirect_qp = sport->pma_qp_nr;
- poi->redirect_qkey = IB_QP1_QKEY;
-
- ehca_query_pkey(ibdev, port_num, in_wc->pkey_index,
- &poi->redirect_pkey);
-
- /* if request was globally routed, copy route info */
- if (in_grh) {
- const struct vertcfl *vertcfl =
- (const struct vertcfl *)&in_grh->version_tclass_flow;
- memcpy(poi->redirect_gid, in_grh->dgid.raw,
- sizeof(poi->redirect_gid));
- tcslfl->tc = vertcfl->tc;
- tcslfl->fl = vertcfl->fl;
- } else
- /* else only fill in default GID */
- ehca_query_gid(ibdev, port_num, 0,
- (union ib_gid *)&poi->redirect_gid);
-
- ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
- sport->saved_attr.lid, sport->pma_qp_nr);
- break;
-
- case IB_MGMT_METHOD_GET_RESP:
- return IB_MAD_RESULT_FAILURE;
-
- default:
- out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_METHOD;
- break;
- }
-
-perf_reply:
- out_perf->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
-
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
-}
-
-int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
- const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
-{
- int ret;
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
-
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
-
- if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
- return IB_MAD_RESULT_FAILURE;
-
- /* accept only pma request */
- if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
- return IB_MAD_RESULT_SUCCESS;
-
- ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp);
- ret = ehca_process_perf(ibdev, port_num, in_wc, in_grh,
- in_mad, out_mad);
-
- return ret;
-}
diff --git a/drivers/staging/rdma/ehca/ehca_tools.h b/drivers/staging/rdma/ehca/ehca_tools.h
deleted file mode 100644
index d280b12aae64..000000000000
--- a/drivers/staging/rdma/ehca/ehca_tools.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * auxiliary functions
- *
- * Authors: Christoph Raisch <raisch@de.ibm.com>
- * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- * Khadija Souissi <souissik@de.ibm.com>
- * Waleri Fomin <fomin@de.ibm.com>
- * Heiko J Schick <schickhj@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#ifndef EHCA_TOOLS_H
-#define EHCA_TOOLS_H
-
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/idr.h>
-#include <linux/kthread.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/vmalloc.h>
-#include <linux/notifier.h>
-#include <linux/cpu.h>
-#include <linux/device.h>
-
-#include <linux/atomic.h>
-#include <asm/ibmebus.h>
-#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/hvcall.h>
-
-extern int ehca_debug_level;
-
-#define ehca_dbg(ib_dev, format, arg...) \
- do { \
- if (unlikely(ehca_debug_level)) \
- dev_printk(KERN_DEBUG, (ib_dev)->dma_device, \
- "PU%04x EHCA_DBG:%s " format "\n", \
- raw_smp_processor_id(), __func__, \
- ## arg); \
- } while (0)
-
-#define ehca_info(ib_dev, format, arg...) \
- dev_info((ib_dev)->dma_device, "PU%04x EHCA_INFO:%s " format "\n", \
- raw_smp_processor_id(), __func__, ## arg)
-
-#define ehca_warn(ib_dev, format, arg...) \
- dev_warn((ib_dev)->dma_device, "PU%04x EHCA_WARN:%s " format "\n", \
- raw_smp_processor_id(), __func__, ## arg)
-
-#define ehca_err(ib_dev, format, arg...) \
- dev_err((ib_dev)->dma_device, "PU%04x EHCA_ERR:%s " format "\n", \
- raw_smp_processor_id(), __func__, ## arg)
-
-/* use this one only if no ib_dev available */
-#define ehca_gen_dbg(format, arg...) \
- do { \
- if (unlikely(ehca_debug_level)) \
- printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n", \
- raw_smp_processor_id(), __func__, ## arg); \
- } while (0)
-
-#define ehca_gen_warn(format, arg...) \
- printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n", \
- raw_smp_processor_id(), __func__, ## arg)
-
-#define ehca_gen_err(format, arg...) \
- printk(KERN_ERR "PU%04x EHCA_ERR:%s " format "\n", \
- raw_smp_processor_id(), __func__, ## arg)
-
-/**
- * ehca_dmp - printk a memory block, whose length is n*8 bytes.
- * Each line has the following layout:
- * <format string> adr=X ofs=Y <8 bytes hex> <8 bytes hex>
- */
-#define ehca_dmp(adr, len, format, args...) \
- do { \
- unsigned int x; \
- unsigned int l = (unsigned int)(len); \
- unsigned char *deb = (unsigned char *)(adr); \
- for (x = 0; x < l; x += 16) { \
- printk(KERN_INFO "EHCA_DMP:%s " format \
- " adr=%p ofs=%04x %016llx %016llx\n", \
- __func__, ##args, deb, x, \
- *((u64 *)&deb[0]), *((u64 *)&deb[8])); \
- deb += 16; \
- } \
- } while (0)
-
-/* define a bitmask, little endian version */
-#define EHCA_BMASK(pos, length) (((pos) << 16) + (length))
-
-/* define a bitmask, the ibm way... */
-#define EHCA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1))
-
-/* internal function, don't use */
-#define EHCA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff)
-
-/* internal function, don't use */
-#define EHCA_BMASK_MASK(mask) (~0ULL >> ((64 - (mask)) & 0xffff))
-
-/**
- * EHCA_BMASK_SET - return value shifted and masked by mask
- * variable|=EHCA_BMASK_SET(MY_MASK,0x4711) ORs the bits in variable
- * variable&=~EHCA_BMASK_SET(MY_MASK,-1) clears the bits from the mask
- * in variable
- */
-#define EHCA_BMASK_SET(mask, value) \
- ((EHCA_BMASK_MASK(mask) & ((u64)(value))) << EHCA_BMASK_SHIFTPOS(mask))
-
-/**
- * EHCA_BMASK_GET - extract a parameter from value by mask
- */
-#define EHCA_BMASK_GET(mask, value) \
- (EHCA_BMASK_MASK(mask) & (((u64)(value)) >> EHCA_BMASK_SHIFTPOS(mask)))
-
-/* Converts ehca to ib return code */
-int ehca2ib_return_code(u64 ehca_rc);
-
-#endif /* EHCA_TOOLS_H */
diff --git a/drivers/staging/rdma/ehca/ehca_uverbs.c b/drivers/staging/rdma/ehca/ehca_uverbs.c
deleted file mode 100644
index 1a1d5d99fcf9..000000000000
--- a/drivers/staging/rdma/ehca/ehca_uverbs.c
+++ /dev/null
@@ -1,309 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * userspace support verbs
- *
- * Authors: Christoph Raisch <raisch@de.ibm.com>
- * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- * Heiko J Schick <schickhj@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_classes.h"
-#include "ehca_iverbs.h"
-#include "ehca_mrmw.h"
-#include "ehca_tools.h"
-#include "hcp_if.h"
-
-struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
- struct ib_udata *udata)
-{
- struct ehca_ucontext *my_context;
-
- my_context = kzalloc(sizeof *my_context, GFP_KERNEL);
- if (!my_context) {
- ehca_err(device, "Out of memory device=%p", device);
- return ERR_PTR(-ENOMEM);
- }
-
- return &my_context->ib_ucontext;
-}
-
-int ehca_dealloc_ucontext(struct ib_ucontext *context)
-{
- kfree(container_of(context, struct ehca_ucontext, ib_ucontext));
- return 0;
-}
-
-static void ehca_mm_open(struct vm_area_struct *vma)
-{
- u32 *count = (u32 *)vma->vm_private_data;
- if (!count) {
- ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
- vma->vm_start, vma->vm_end);
- return;
- }
- (*count)++;
- if (!(*count))
- ehca_gen_err("Use count overflow vm_start=%lx vm_end=%lx",
- vma->vm_start, vma->vm_end);
- ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
- vma->vm_start, vma->vm_end, *count);
-}
-
-static void ehca_mm_close(struct vm_area_struct *vma)
-{
- u32 *count = (u32 *)vma->vm_private_data;
- if (!count) {
- ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
- vma->vm_start, vma->vm_end);
- return;
- }
- (*count)--;
- ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
- vma->vm_start, vma->vm_end, *count);
-}
-
-static const struct vm_operations_struct vm_ops = {
- .open = ehca_mm_open,
- .close = ehca_mm_close,
-};
-
-static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
- u32 *mm_count)
-{
- int ret;
- u64 vsize, physical;
-
- vsize = vma->vm_end - vma->vm_start;
- if (vsize < EHCA_PAGESIZE) {
- ehca_gen_err("invalid vsize=%lx", vma->vm_end - vma->vm_start);
- return -EINVAL;
- }
-
- physical = galpas->user.fw_handle;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical);
- /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
- ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT,
- vma->vm_page_prot);
- if (unlikely(ret)) {
- ehca_gen_err("remap_pfn_range() failed ret=%i", ret);
- return -ENOMEM;
- }
-
- vma->vm_private_data = mm_count;
- (*mm_count)++;
- vma->vm_ops = &vm_ops;
-
- return 0;
-}
-
-static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue,
- u32 *mm_count)
-{
- int ret;
- u64 start, ofs;
- struct page *page;
-
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- start = vma->vm_start;
- for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) {
- u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs);
- page = virt_to_page(virt_addr);
- ret = vm_insert_page(vma, start, page);
- if (unlikely(ret)) {
- ehca_gen_err("vm_insert_page() failed rc=%i", ret);
- return ret;
- }
- start += PAGE_SIZE;
- }
- vma->vm_private_data = mm_count;
- (*mm_count)++;
- vma->vm_ops = &vm_ops;
-
- return 0;
-}
-
-static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq,
- u32 rsrc_type)
-{
- int ret;
-
- switch (rsrc_type) {
- case 0: /* galpa fw handle */
- ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number);
- ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa);
- if (unlikely(ret)) {
- ehca_err(cq->ib_cq.device,
- "ehca_mmap_fw() failed rc=%i cq_num=%x",
- ret, cq->cq_number);
- return ret;
- }
- break;
-
- case 1: /* cq queue_addr */
- ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number);
- ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue);
- if (unlikely(ret)) {
- ehca_err(cq->ib_cq.device,
- "ehca_mmap_queue() failed rc=%i cq_num=%x",
- ret, cq->cq_number);
- return ret;
- }
- break;
-
- default:
- ehca_err(cq->ib_cq.device, "bad resource type=%x cq_num=%x",
- rsrc_type, cq->cq_number);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
- u32 rsrc_type)
-{
- int ret;
-
- switch (rsrc_type) {
- case 0: /* galpa fw handle */
- ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num);
- ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa);
- if (unlikely(ret)) {
- ehca_err(qp->ib_qp.device,
- "remap_pfn_range() failed ret=%i qp_num=%x",
- ret, qp->ib_qp.qp_num);
- return -ENOMEM;
- }
- break;
-
- case 1: /* qp rqueue_addr */
- ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num);
- ret = ehca_mmap_queue(vma, &qp->ipz_rqueue,
- &qp->mm_count_rqueue);
- if (unlikely(ret)) {
- ehca_err(qp->ib_qp.device,
- "ehca_mmap_queue(rq) failed rc=%i qp_num=%x",
- ret, qp->ib_qp.qp_num);
- return ret;
- }
- break;
-
- case 2: /* qp squeue_addr */
- ehca_dbg(qp->ib_qp.device, "qp_num=%x sq", qp->ib_qp.qp_num);
- ret = ehca_mmap_queue(vma, &qp->ipz_squeue,
- &qp->mm_count_squeue);
- if (unlikely(ret)) {
- ehca_err(qp->ib_qp.device,
- "ehca_mmap_queue(sq) failed rc=%i qp_num=%x",
- ret, qp->ib_qp.qp_num);
- return ret;
- }
- break;
-
- default:
- ehca_err(qp->ib_qp.device, "bad resource type=%x qp=num=%x",
- rsrc_type, qp->ib_qp.qp_num);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
-{
- u64 fileoffset = vma->vm_pgoff;
- u32 idr_handle = fileoffset & 0x1FFFFFF;
- u32 q_type = (fileoffset >> 27) & 0x1; /* CQ, QP,... */
- u32 rsrc_type = (fileoffset >> 25) & 0x3; /* sq,rq,cmnd_window */
- u32 ret;
- struct ehca_cq *cq;
- struct ehca_qp *qp;
- struct ib_uobject *uobject;
-
- switch (q_type) {
- case 0: /* CQ */
- read_lock(&ehca_cq_idr_lock);
- cq = idr_find(&ehca_cq_idr, idr_handle);
- read_unlock(&ehca_cq_idr_lock);
-
- /* make sure this mmap really belongs to the authorized user */
- if (!cq)
- return -EINVAL;
-
- if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
- return -EINVAL;
-
- ret = ehca_mmap_cq(vma, cq, rsrc_type);
- if (unlikely(ret)) {
- ehca_err(cq->ib_cq.device,
- "ehca_mmap_cq() failed rc=%i cq_num=%x",
- ret, cq->cq_number);
- return ret;
- }
- break;
-
- case 1: /* QP */
- read_lock(&ehca_qp_idr_lock);
- qp = idr_find(&ehca_qp_idr, idr_handle);
- read_unlock(&ehca_qp_idr_lock);
-
- /* make sure this mmap really belongs to the authorized user */
- if (!qp)
- return -EINVAL;
-
- uobject = IS_SRQ(qp) ? qp->ib_srq.uobject : qp->ib_qp.uobject;
- if (!uobject || uobject->context != context)
- return -EINVAL;
-
- ret = ehca_mmap_qp(vma, qp, rsrc_type);
- if (unlikely(ret)) {
- ehca_err(qp->ib_qp.device,
- "ehca_mmap_qp() failed rc=%i qp_num=%x",
- ret, qp->ib_qp.qp_num);
- return ret;
- }
- break;
-
- default:
- ehca_gen_err("bad queue type %x", q_type);
- return -EINVAL;
- }
-
- return 0;
-}
diff --git a/drivers/staging/rdma/ehca/hcp_if.c b/drivers/staging/rdma/ehca/hcp_if.c
deleted file mode 100644
index 89517ffb4389..000000000000
--- a/drivers/staging/rdma/ehca/hcp_if.c
+++ /dev/null
@@ -1,949 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * Firmware Infiniband Interface code for POWER
- *
- * Authors: Christoph Raisch <raisch@de.ibm.com>
- * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- * Joachim Fenkes <fenkes@de.ibm.com>
- * Gerd Bayer <gerd.bayer@de.ibm.com>
- * Waleri Fomin <fomin@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <asm/hvcall.h>
-#include "ehca_tools.h"
-#include "hcp_if.h"
-#include "hcp_phyp.h"
-#include "hipz_fns.h"
-#include "ipz_pt_fn.h"
-
-#define H_ALL_RES_QP_ENHANCED_OPS EHCA_BMASK_IBM(9, 11)
-#define H_ALL_RES_QP_PTE_PIN EHCA_BMASK_IBM(12, 12)
-#define H_ALL_RES_QP_SERVICE_TYPE EHCA_BMASK_IBM(13, 15)
-#define H_ALL_RES_QP_STORAGE EHCA_BMASK_IBM(16, 17)
-#define H_ALL_RES_QP_LL_RQ_CQE_POSTING EHCA_BMASK_IBM(18, 18)
-#define H_ALL_RES_QP_LL_SQ_CQE_POSTING EHCA_BMASK_IBM(19, 21)
-#define H_ALL_RES_QP_SIGNALING_TYPE EHCA_BMASK_IBM(22, 23)
-#define H_ALL_RES_QP_UD_AV_LKEY_CTRL EHCA_BMASK_IBM(31, 31)
-#define H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE EHCA_BMASK_IBM(32, 35)
-#define H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE EHCA_BMASK_IBM(36, 39)
-#define H_ALL_RES_QP_RESOURCE_TYPE EHCA_BMASK_IBM(56, 63)
-
-#define H_ALL_RES_QP_MAX_OUTST_SEND_WR EHCA_BMASK_IBM(0, 15)
-#define H_ALL_RES_QP_MAX_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
-#define H_ALL_RES_QP_MAX_SEND_SGE EHCA_BMASK_IBM(32, 39)
-#define H_ALL_RES_QP_MAX_RECV_SGE EHCA_BMASK_IBM(40, 47)
-
-#define H_ALL_RES_QP_UD_AV_LKEY EHCA_BMASK_IBM(32, 63)
-#define H_ALL_RES_QP_SRQ_QP_TOKEN EHCA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_SRQ_QP_HANDLE EHCA_BMASK_IBM(0, 64)
-#define H_ALL_RES_QP_SRQ_LIMIT EHCA_BMASK_IBM(48, 63)
-#define H_ALL_RES_QP_SRQ_QPN EHCA_BMASK_IBM(40, 63)
-
-#define H_ALL_RES_QP_ACT_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
-#define H_ALL_RES_QP_ACT_OUTST_RECV_WR EHCA_BMASK_IBM(48, 63)
-#define H_ALL_RES_QP_ACT_SEND_SGE EHCA_BMASK_IBM(8, 15)
-#define H_ALL_RES_QP_ACT_RECV_SGE EHCA_BMASK_IBM(24, 31)
-
-#define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63)
-
-#define H_MP_INIT_TYPE EHCA_BMASK_IBM(44, 47)
-#define H_MP_SHUTDOWN EHCA_BMASK_IBM(48, 48)
-#define H_MP_RESET_QKEY_CTR EHCA_BMASK_IBM(49, 49)
-
-#define HCALL4_REGS_FORMAT "r4=%lx r5=%lx r6=%lx r7=%lx"
-#define HCALL7_REGS_FORMAT HCALL4_REGS_FORMAT " r8=%lx r9=%lx r10=%lx"
-#define HCALL9_REGS_FORMAT HCALL7_REGS_FORMAT " r11=%lx r12=%lx"
-
-static DEFINE_SPINLOCK(hcall_lock);
-
-static long ehca_plpar_hcall_norets(unsigned long opcode,
- unsigned long arg1,
- unsigned long arg2,
- unsigned long arg3,
- unsigned long arg4,
- unsigned long arg5,
- unsigned long arg6,
- unsigned long arg7)
-{
- long ret;
- int i, sleep_msecs;
- unsigned long flags = 0;
-
- if (unlikely(ehca_debug_level >= 2))
- ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
- opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
-
- for (i = 0; i < 5; i++) {
- /* serialize hCalls to work around firmware issue */
- if (ehca_lock_hcalls)
- spin_lock_irqsave(&hcall_lock, flags);
-
- ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
- arg5, arg6, arg7);
-
- if (ehca_lock_hcalls)
- spin_unlock_irqrestore(&hcall_lock, flags);
-
- if (H_IS_LONG_BUSY(ret)) {
- sleep_msecs = get_longbusy_msecs(ret);
- msleep_interruptible(sleep_msecs);
- continue;
- }
-
- if (ret < H_SUCCESS)
- ehca_gen_err("opcode=%lx ret=%li " HCALL7_REGS_FORMAT,
- opcode, ret, arg1, arg2, arg3,
- arg4, arg5, arg6, arg7);
- else
- if (unlikely(ehca_debug_level >= 2))
- ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
-
- return ret;
- }
-
- return H_BUSY;
-}
-
-static long ehca_plpar_hcall9(unsigned long opcode,
- unsigned long *outs, /* array of 9 outputs */
- unsigned long arg1,
- unsigned long arg2,
- unsigned long arg3,
- unsigned long arg4,
- unsigned long arg5,
- unsigned long arg6,
- unsigned long arg7,
- unsigned long arg8,
- unsigned long arg9)
-{
- long ret;
- int i, sleep_msecs;
- unsigned long flags = 0;
-
- if (unlikely(ehca_debug_level >= 2))
- ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
- arg1, arg2, arg3, arg4, arg5,
- arg6, arg7, arg8, arg9);
-
- for (i = 0; i < 5; i++) {
- /* serialize hCalls to work around firmware issue */
- if (ehca_lock_hcalls)
- spin_lock_irqsave(&hcall_lock, flags);
-
- ret = plpar_hcall9(opcode, outs,
- arg1, arg2, arg3, arg4, arg5,
- arg6, arg7, arg8, arg9);
-
- if (ehca_lock_hcalls)
- spin_unlock_irqrestore(&hcall_lock, flags);
-
- if (H_IS_LONG_BUSY(ret)) {
- sleep_msecs = get_longbusy_msecs(ret);
- msleep_interruptible(sleep_msecs);
- continue;
- }
-
- if (ret < H_SUCCESS) {
- ehca_gen_err("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT,
- opcode, arg1, arg2, arg3, arg4, arg5,
- arg6, arg7, arg8, arg9);
- ehca_gen_err("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
- ret, outs[0], outs[1], outs[2], outs[3],
- outs[4], outs[5], outs[6], outs[7],
- outs[8]);
- } else if (unlikely(ehca_debug_level >= 2))
- ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
- ret, outs[0], outs[1], outs[2], outs[3],
- outs[4], outs[5], outs[6], outs[7],
- outs[8]);
- return ret;
- }
-
- return H_BUSY;
-}
-
-u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
- struct ehca_pfeq *pfeq,
- const u32 neq_control,
- const u32 number_of_entries,
- struct ipz_eq_handle *eq_handle,
- u32 *act_nr_of_entries,
- u32 *act_pages,
- u32 *eq_ist)
-{
- u64 ret;
- unsigned long outs[PLPAR_HCALL9_BUFSIZE];
- u64 allocate_controls;
-
- /* resource type */
- allocate_controls = 3ULL;
-
- /* ISN is associated */
- if (neq_control != 1)
- allocate_controls = (1ULL << (63 - 7)) | allocate_controls;
- else /* notification event queue */
- allocate_controls = (1ULL << 63) | allocate_controls;
-
- ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
- adapter_handle.handle, /* r4 */
- allocate_controls, /* r5 */
- number_of_entries, /* r6 */
- 0, 0, 0, 0, 0, 0);
- eq_handle->handle = outs[0];
- *act_nr_of_entries = (u32)outs[3];
- *act_pages = (u32)outs[4];
- *eq_ist = (u32)outs[5];
-
- if (ret == H_NOT_ENOUGH_RESOURCES)
- ehca_gen_err("Not enough resource - ret=%lli ", ret);
-
- return ret;
-}
-
-u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
- struct ipz_eq_handle eq_handle,
- const u64 event_mask)
-{
- return ehca_plpar_hcall_norets(H_RESET_EVENTS,
- adapter_handle.handle, /* r4 */
- eq_handle.handle, /* r5 */
- event_mask, /* r6 */
- 0, 0, 0, 0);
-}
-
-u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
- struct ehca_cq *cq,
- struct ehca_alloc_cq_parms *param)
-{
- int rc;
- u64 ret;
- unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
- ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
- adapter_handle.handle, /* r4 */
- 2, /* r5 */
- param->eq_handle.handle, /* r6 */
- cq->token, /* r7 */
- param->nr_cqe, /* r8 */
- 0, 0, 0, 0);
- cq->ipz_cq_handle.handle = outs[0];
- param->act_nr_of_entries = (u32)outs[3];
- param->act_pages = (u32)outs[4];
-
- if (ret == H_SUCCESS) {
- rc = hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
- if (rc) {
- ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
- rc, outs[5]);
-
- ehca_plpar_hcall_norets(H_FREE_RESOURCE,
- adapter_handle.handle, /* r4 */
- cq->ipz_cq_handle.handle, /* r5 */
- 0, 0, 0, 0, 0);
- ret = H_NO_MEM;
- }
- }
-
- if (ret == H_NOT_ENOUGH_RESOURCES)
- ehca_gen_err("Not enough resources. ret=%lli", ret);
-
- return ret;
-}
-
-u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
- struct ehca_alloc_qp_parms *parms, int is_user)
-{
- int rc;
- u64 ret;
- u64 allocate_controls, max_r10_reg, r11, r12;
- unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
- allocate_controls =
- EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type)
- | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
- | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
- | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
- | EHCA_BMASK_SET(H_ALL_RES_QP_STORAGE, parms->qp_storage)
- | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE,
- parms->squeue.page_size)
- | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE,
- parms->rqueue.page_size)
- | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
- !!(parms->ll_comp_flags & LLQP_RECV_COMP))
- | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
- !!(parms->ll_comp_flags & LLQP_SEND_COMP))
- | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
- parms->ud_av_l_key_ctl)
- | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);
-
- max_r10_reg =
- EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
- parms->squeue.max_wr + 1)
- | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
- parms->rqueue.max_wr + 1)
- | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
- parms->squeue.max_sge)
- | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
- parms->rqueue.max_sge);
-
- r11 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QP_TOKEN, parms->srq_token);
-
- if (parms->ext_type == EQPT_SRQ)
- r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_LIMIT, parms->srq_limit);
- else
- r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QPN, parms->srq_qpn);
-
- ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
- adapter_handle.handle, /* r4 */
- allocate_controls, /* r5 */
- parms->send_cq_handle.handle,
- parms->recv_cq_handle.handle,
- parms->eq_handle.handle,
- ((u64)parms->token << 32) | parms->pd.value,
- max_r10_reg, r11, r12);
-
- parms->qp_handle.handle = outs[0];
- parms->real_qp_num = (u32)outs[1];
- parms->squeue.act_nr_wqes =
- (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
- parms->rqueue.act_nr_wqes =
- (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
- parms->squeue.act_nr_sges =
- (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]);
- parms->rqueue.act_nr_sges =
- (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]);
- parms->squeue.queue_size =
- (u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]);
- parms->rqueue.queue_size =
- (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
-
- if (ret == H_SUCCESS) {
- rc = hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
- if (rc) {
- ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
- rc, outs[6]);
-
- ehca_plpar_hcall_norets(H_FREE_RESOURCE,
- adapter_handle.handle, /* r4 */
- parms->qp_handle.handle, /* r5 */
- 0, 0, 0, 0, 0);
- ret = H_NO_MEM;
- }
- }
-
- if (ret == H_NOT_ENOUGH_RESOURCES)
- ehca_gen_err("Not enough resources. ret=%lli", ret);
-
- return ret;
-}
-
-u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
- const u8 port_id,
- struct hipz_query_port *query_port_response_block)
-{
- u64 ret;
- u64 r_cb = __pa(query_port_response_block);
-
- if (r_cb & (EHCA_PAGESIZE-1)) {
- ehca_gen_err("response block not page aligned");
- return H_PARAMETER;
- }
-
- ret = ehca_plpar_hcall_norets(H_QUERY_PORT,
- adapter_handle.handle, /* r4 */
- port_id, /* r5 */
- r_cb, /* r6 */
- 0, 0, 0, 0);
-
- if (ehca_debug_level >= 2)
- ehca_dmp(query_port_response_block, 64, "response_block");
-
- return ret;
-}
-
-u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
- const u8 port_id, const u32 port_cap,
- const u8 init_type, const int modify_mask)
-{
- u64 port_attributes = port_cap;
-
- if (modify_mask & IB_PORT_SHUTDOWN)
- port_attributes |= EHCA_BMASK_SET(H_MP_SHUTDOWN, 1);
- if (modify_mask & IB_PORT_INIT_TYPE)
- port_attributes |= EHCA_BMASK_SET(H_MP_INIT_TYPE, init_type);
- if (modify_mask & IB_PORT_RESET_QKEY_CNTR)
- port_attributes |= EHCA_BMASK_SET(H_MP_RESET_QKEY_CTR, 1);
-
- return ehca_plpar_hcall_norets(H_MODIFY_PORT,
- adapter_handle.handle, /* r4 */
- port_id, /* r5 */
- port_attributes, /* r6 */
- 0, 0, 0, 0);
-}
-
-u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
- struct hipz_query_hca *query_hca_rblock)
-{
- u64 r_cb = __pa(query_hca_rblock);
-
- if (r_cb & (EHCA_PAGESIZE-1)) {
- ehca_gen_err("response_block=%p not page aligned",
- query_hca_rblock);
- return H_PARAMETER;
- }
-
- return ehca_plpar_hcall_norets(H_QUERY_HCA,
- adapter_handle.handle, /* r4 */
- r_cb, /* r5 */
- 0, 0, 0, 0, 0);
-}
-
-u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
- const u8 pagesize,
- const u8 queue_type,
- const u64 resource_handle,
- const u64 logical_address_of_page,
- u64 count)
-{
- return ehca_plpar_hcall_norets(H_REGISTER_RPAGES,
- adapter_handle.handle, /* r4 */
- (u64)queue_type | ((u64)pagesize) << 8,
- /* r5 */
- resource_handle, /* r6 */
- logical_address_of_page, /* r7 */
- count, /* r8 */
- 0, 0);
-}
-
-u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
- const struct ipz_eq_handle eq_handle,
- struct ehca_pfeq *pfeq,
- const u8 pagesize,
- const u8 queue_type,
- const u64 logical_address_of_page,
- const u64 count)
-{
- if (count != 1) {
- ehca_gen_err("Ppage counter=%llx", count);
- return H_PARAMETER;
- }
- return hipz_h_register_rpage(adapter_handle,
- pagesize,
- queue_type,
- eq_handle.handle,
- logical_address_of_page, count);
-}
-
-u64 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle,
- u32 ist)
-{
- u64 ret;
- ret = ehca_plpar_hcall_norets(H_QUERY_INT_STATE,
- adapter_handle.handle, /* r4 */
- ist, /* r5 */
- 0, 0, 0, 0, 0);
-
- if (ret != H_SUCCESS && ret != H_BUSY)
- ehca_gen_err("Could not query interrupt state.");
-
- return ret;
-}
-
-u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
- const struct ipz_cq_handle cq_handle,
- struct ehca_pfcq *pfcq,
- const u8 pagesize,
- const u8 queue_type,
- const u64 logical_address_of_page,
- const u64 count,
- const struct h_galpa gal)
-{
- if (count != 1) {
- ehca_gen_err("Page counter=%llx", count);
- return H_PARAMETER;
- }
-
- return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
- cq_handle.handle, logical_address_of_page,
- count);
-}
-
-u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
- const struct ipz_qp_handle qp_handle,
- struct ehca_pfqp *pfqp,
- const u8 pagesize,
- const u8 queue_type,
- const u64 logical_address_of_page,
- const u64 count,
- const struct h_galpa galpa)
-{
- if (count > 1) {
- ehca_gen_err("Page counter=%llx", count);
- return H_PARAMETER;
- }
-
- return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
- qp_handle.handle, logical_address_of_page,
- count);
-}
-
-u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
- const struct ipz_qp_handle qp_handle,
- struct ehca_pfqp *pfqp,
- void **log_addr_next_sq_wqe2processed,
- void **log_addr_next_rq_wqe2processed,
- int dis_and_get_function_code)
-{
- u64 ret;
- unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
- ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
- adapter_handle.handle, /* r4 */
- dis_and_get_function_code, /* r5 */
- qp_handle.handle, /* r6 */
- 0, 0, 0, 0, 0, 0);
- if (log_addr_next_sq_wqe2processed)
- *log_addr_next_sq_wqe2processed = (void *)outs[0];
- if (log_addr_next_rq_wqe2processed)
- *log_addr_next_rq_wqe2processed = (void *)outs[1];
-
- return ret;
-}
-
-u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
- const struct ipz_qp_handle qp_handle,
- struct ehca_pfqp *pfqp,
- const u64 update_mask,
- struct hcp_modify_qp_control_block *mqpcb,
- struct h_galpa gal)
-{
- u64 ret;
- unsigned long outs[PLPAR_HCALL9_BUFSIZE];
- ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
- adapter_handle.handle, /* r4 */
- qp_handle.handle, /* r5 */
- update_mask, /* r6 */
- __pa(mqpcb), /* r7 */
- 0, 0, 0, 0, 0);
-
- if (ret == H_NOT_ENOUGH_RESOURCES)
- ehca_gen_err("Insufficient resources ret=%lli", ret);
-
- return ret;
-}
-
-u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
- const struct ipz_qp_handle qp_handle,
- struct ehca_pfqp *pfqp,
- struct hcp_modify_qp_control_block *qqpcb,
- struct h_galpa gal)
-{
- return ehca_plpar_hcall_norets(H_QUERY_QP,
- adapter_handle.handle, /* r4 */
- qp_handle.handle, /* r5 */
- __pa(qqpcb), /* r6 */
- 0, 0, 0, 0);
-}
-
-u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
- struct ehca_qp *qp)
-{
- u64 ret;
- unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
- ret = hcp_galpas_dtor(&qp->galpas);
- if (ret) {
- ehca_gen_err("Could not destruct qp->galpas");
- return H_RESOURCE;
- }
- ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
- adapter_handle.handle, /* r4 */
- /* function code */
- 1, /* r5 */
- qp->ipz_qp_handle.handle, /* r6 */
- 0, 0, 0, 0, 0, 0);
- if (ret == H_HARDWARE)
- ehca_gen_err("HCA not operational. ret=%lli", ret);
-
- ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
- adapter_handle.handle, /* r4 */
- qp->ipz_qp_handle.handle, /* r5 */
- 0, 0, 0, 0, 0);
-
- if (ret == H_RESOURCE)
- ehca_gen_err("Resource still in use. ret=%lli", ret);
-
- return ret;
-}
-
-u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
- const struct ipz_qp_handle qp_handle,
- struct h_galpa gal,
- u32 port)
-{
- return ehca_plpar_hcall_norets(H_DEFINE_AQP0,
- adapter_handle.handle, /* r4 */
- qp_handle.handle, /* r5 */
- port, /* r6 */
- 0, 0, 0, 0);
-}
-
-u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
- const struct ipz_qp_handle qp_handle,
- struct h_galpa gal,
- u32 port, u32 * pma_qp_nr,
- u32 * bma_qp_nr)
-{
- u64 ret;
- unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
- ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
- adapter_handle.handle, /* r4 */
- qp_handle.handle, /* r5 */
- port, /* r6 */
- 0, 0, 0, 0, 0, 0);
- *pma_qp_nr = (u32)outs[0];
- *bma_qp_nr = (u32)outs[1];
-
- if (ret == H_ALIAS_EXIST)
- ehca_gen_err("AQP1 already exists. ret=%lli", ret);
-
- return ret;
-}
-
-u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
- const struct ipz_qp_handle qp_handle,
- struct h_galpa gal,
- u16 mcg_dlid,
- u64 subnet_prefix, u64 interface_id)
-{
- u64 ret;
-
- ret = ehca_plpar_hcall_norets(H_ATTACH_MCQP,
- adapter_handle.handle, /* r4 */
- qp_handle.handle, /* r5 */
- mcg_dlid, /* r6 */
- interface_id, /* r7 */
- subnet_prefix, /* r8 */
- 0, 0);
-
- if (ret == H_NOT_ENOUGH_RESOURCES)
- ehca_gen_err("Not enough resources. ret=%lli", ret);
-
- return ret;
-}
-
-u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
- const struct ipz_qp_handle qp_handle,
- struct h_galpa gal,
- u16 mcg_dlid,
- u64 subnet_prefix, u64 interface_id)
-{
- return ehca_plpar_hcall_norets(H_DETACH_MCQP,
- adapter_handle.handle, /* r4 */
- qp_handle.handle, /* r5 */
- mcg_dlid, /* r6 */
- interface_id, /* r7 */
- subnet_prefix, /* r8 */
- 0, 0);
-}
-
-u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
- struct ehca_cq *cq,
- u8 force_flag)
-{
- u64 ret;
-
- ret = hcp_galpas_dtor(&cq->galpas);
- if (ret) {
- ehca_gen_err("Could not destruct cp->galpas");
- return H_RESOURCE;
- }
-
- ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
- adapter_handle.handle, /* r4 */
- cq->ipz_cq_handle.handle, /* r5 */
- force_flag != 0 ? 1L : 0L, /* r6 */
- 0, 0, 0, 0);
-
- if (ret == H_RESOURCE)
- ehca_gen_err("H_FREE_RESOURCE failed ret=%lli ", ret);
-
- return ret;
-}
-
-u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
- struct ehca_eq *eq)
-{
- u64 ret;
-
- ret = hcp_galpas_dtor(&eq->galpas);
- if (ret) {
- ehca_gen_err("Could not destruct eq->galpas");
- return H_RESOURCE;
- }
-
- ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
- adapter_handle.handle, /* r4 */
- eq->ipz_eq_handle.handle, /* r5 */
- 0, 0, 0, 0, 0);
-
- if (ret == H_RESOURCE)
- ehca_gen_err("Resource in use. ret=%lli ", ret);
-
- return ret;
-}
-
-u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
- const struct ehca_mr *mr,
- const u64 vaddr,
- const u64 length,
- const u32 access_ctrl,
- const struct ipz_pd pd,
- struct ehca_mr_hipzout_parms *outparms)
-{
- u64 ret;
- unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
- ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
- adapter_handle.handle, /* r4 */
- 5, /* r5 */
- vaddr, /* r6 */
- length, /* r7 */
- (((u64)access_ctrl) << 32ULL), /* r8 */
- pd.value, /* r9 */
- 0, 0, 0);
- outparms->handle.handle = outs[0];
- outparms->lkey = (u32)outs[2];
- outparms->rkey = (u32)outs[3];
-
- return ret;
-}
-
-u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
- const struct ehca_mr *mr,
- const u8 pagesize,
- const u8 queue_type,
- const u64 logical_address_of_page,
- const u64 count)
-{
- u64 ret;
-
- if (unlikely(ehca_debug_level >= 3)) {
- if (count > 1) {
- u64 *kpage;
- int i;
- kpage = __va(logical_address_of_page);
- for (i = 0; i < count; i++)
- ehca_gen_dbg("kpage[%d]=%p",
- i, (void *)kpage[i]);
- } else
- ehca_gen_dbg("kpage=%p",
- (void *)logical_address_of_page);
- }
-
- if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
- ehca_gen_err("logical_address_of_page not on a 4k boundary "
- "adapter_handle=%llx mr=%p mr_handle=%llx "
- "pagesize=%x queue_type=%x "
- "logical_address_of_page=%llx count=%llx",
- adapter_handle.handle, mr,
- mr->ipz_mr_handle.handle, pagesize, queue_type,
- logical_address_of_page, count);
- ret = H_PARAMETER;
- } else
- ret = hipz_h_register_rpage(adapter_handle, pagesize,
- queue_type,
- mr->ipz_mr_handle.handle,
- logical_address_of_page, count);
- return ret;
-}
-
-u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
- const struct ehca_mr *mr,
- struct ehca_mr_hipzout_parms *outparms)
-{
- u64 ret;
- unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
- ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
- adapter_handle.handle, /* r4 */
- mr->ipz_mr_handle.handle, /* r5 */
- 0, 0, 0, 0, 0, 0, 0);
- outparms->len = outs[0];
- outparms->vaddr = outs[1];
- outparms->acl = outs[4] >> 32;
- outparms->lkey = (u32)(outs[5] >> 32);
- outparms->rkey = (u32)(outs[5] & (0xffffffff));
-
- return ret;
-}
-
-u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
- const struct ehca_mr *mr)
-{
- return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
- adapter_handle.handle, /* r4 */
- mr->ipz_mr_handle.handle, /* r5 */
- 0, 0, 0, 0, 0);
-}
-
-u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
- const struct ehca_mr *mr,
- const u64 vaddr_in,
- const u64 length,
- const u32 access_ctrl,
- const struct ipz_pd pd,
- const u64 mr_addr_cb,
- struct ehca_mr_hipzout_parms *outparms)
-{
- u64 ret;
- unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
- ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
- adapter_handle.handle, /* r4 */
- mr->ipz_mr_handle.handle, /* r5 */
- vaddr_in, /* r6 */
- length, /* r7 */
- /* r8 */
- ((((u64)access_ctrl) << 32ULL) | pd.value),
- mr_addr_cb, /* r9 */
- 0, 0, 0);
- outparms->vaddr = outs[1];
- outparms->lkey = (u32)outs[2];
- outparms->rkey = (u32)outs[3];
-
- return ret;
-}
-
-u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
- const struct ehca_mr *mr,
- const struct ehca_mr *orig_mr,
- const u64 vaddr_in,
- const u32 access_ctrl,
- const struct ipz_pd pd,
- struct ehca_mr_hipzout_parms *outparms)
-{
- u64 ret;
- unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
- ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
- adapter_handle.handle, /* r4 */
- orig_mr->ipz_mr_handle.handle, /* r5 */
- vaddr_in, /* r6 */
- (((u64)access_ctrl) << 32ULL), /* r7 */
- pd.value, /* r8 */
- 0, 0, 0, 0);
- outparms->handle.handle = outs[0];
- outparms->lkey = (u32)outs[2];
- outparms->rkey = (u32)outs[3];
-
- return ret;
-}
-
-u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
- const struct ehca_mw *mw,
- const struct ipz_pd pd,
- struct ehca_mw_hipzout_parms *outparms)
-{
- u64 ret;
- unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
- ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
- adapter_handle.handle, /* r4 */
- 6, /* r5 */
- pd.value, /* r6 */
- 0, 0, 0, 0, 0, 0);
- outparms->handle.handle = outs[0];
- outparms->rkey = (u32)outs[3];
-
- return ret;
-}
-
-u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
- const struct ehca_mw *mw,
- struct ehca_mw_hipzout_parms *outparms)
-{
- u64 ret;
- unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
- ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
- adapter_handle.handle, /* r4 */
- mw->ipz_mw_handle.handle, /* r5 */
- 0, 0, 0, 0, 0, 0, 0);
- outparms->rkey = (u32)outs[3];
-
- return ret;
-}
-
-u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
- const struct ehca_mw *mw)
-{
- return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
- adapter_handle.handle, /* r4 */
- mw->ipz_mw_handle.handle, /* r5 */
- 0, 0, 0, 0, 0);
-}
-
-u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
- const u64 ressource_handle,
- void *rblock,
- unsigned long *byte_count)
-{
- u64 r_cb = __pa(rblock);
-
- if (r_cb & (EHCA_PAGESIZE-1)) {
- ehca_gen_err("rblock not page aligned.");
- return H_PARAMETER;
- }
-
- return ehca_plpar_hcall_norets(H_ERROR_DATA,
- adapter_handle.handle,
- ressource_handle,
- r_cb,
- 0, 0, 0, 0);
-}
-
-u64 hipz_h_eoi(int irq)
-{
- unsigned long xirr;
-
- iosync();
- xirr = (0xffULL << 24) | irq;
-
- return plpar_hcall_norets(H_EOI, xirr);
-}
diff --git a/drivers/staging/rdma/ehca/hcp_if.h b/drivers/staging/rdma/ehca/hcp_if.h
deleted file mode 100644
index a46e514c367b..000000000000
--- a/drivers/staging/rdma/ehca/hcp_if.h
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * Firmware Infiniband Interface code for POWER
- *
- * Authors: Christoph Raisch <raisch@de.ibm.com>
- * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- * Gerd Bayer <gerd.bayer@de.ibm.com>
- * Waleri Fomin <fomin@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HCP_IF_H__
-#define __HCP_IF_H__
-
-#include "ehca_classes.h"
-#include "ehca_tools.h"
-#include "hipz_hw.h"
-
-/*
- * hipz_h_alloc_resource_eq allocates EQ resources in HW and FW, initialize
- * resources, create the empty EQPT (ring).
- */
-u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
- struct ehca_pfeq *pfeq,
- const u32 neq_control,
- const u32 number_of_entries,
- struct ipz_eq_handle *eq_handle,
- u32 * act_nr_of_entries,
- u32 * act_pages,
- u32 * eq_ist);
-
-u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
- struct ipz_eq_handle eq_handle,
- const u64 event_mask);
-/*
- * hipz_h_allocate_resource_cq allocates CQ resources in HW and FW, initialize
- * resources, create the empty CQPT (ring).
- */
-u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
- struct ehca_cq *cq,
- struct ehca_alloc_cq_parms *param);
-
-
-/*
- * hipz_h_alloc_resource_qp allocates QP resources in HW and FW,
- * initialize resources, create empty QPPTs (2 rings).
- */
-u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
- struct ehca_alloc_qp_parms *parms, int is_user);
-
-u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
- const u8 port_id,
- struct hipz_query_port *query_port_response_block);
-
-u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
- const u8 port_id, const u32 port_cap,
- const u8 init_type, const int modify_mask);
-
-u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
- struct hipz_query_hca *query_hca_rblock);
-
-/*
- * hipz_h_register_rpage internal function in hcp_if.h for all
- * hcp_H_REGISTER_RPAGE calls.
- */
-u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
- const u8 pagesize,
- const u8 queue_type,
- const u64 resource_handle,
- const u64 logical_address_of_page,
- u64 count);
-
-u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
- const struct ipz_eq_handle eq_handle,
- struct ehca_pfeq *pfeq,
- const u8 pagesize,
- const u8 queue_type,
- const u64 logical_address_of_page,
- const u64 count);
-
-u64 hipz_h_query_int_state(const struct ipz_adapter_handle
- hcp_adapter_handle,
- u32 ist);
-
-u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
- const struct ipz_cq_handle cq_handle,
- struct ehca_pfcq *pfcq,
- const u8 pagesize,
- const u8 queue_type,
- const u64 logical_address_of_page,
- const u64 count,
- const struct h_galpa gal);
-
-u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
- const struct ipz_qp_handle qp_handle,
- struct ehca_pfqp *pfqp,
- const u8 pagesize,
- const u8 queue_type,
- const u64 logical_address_of_page,
- const u64 count,
- const struct h_galpa galpa);
-
-u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
- const struct ipz_qp_handle qp_handle,
- struct ehca_pfqp *pfqp,
- void **log_addr_next_sq_wqe_tb_processed,
- void **log_addr_next_rq_wqe_tb_processed,
- int dis_and_get_function_code);
-enum hcall_sigt {
- HCALL_SIGT_NO_CQE = 0,
- HCALL_SIGT_BY_WQE = 1,
- HCALL_SIGT_EVERY = 2
-};
-
-u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
- const struct ipz_qp_handle qp_handle,
- struct ehca_pfqp *pfqp,
- const u64 update_mask,
- struct hcp_modify_qp_control_block *mqpcb,
- struct h_galpa gal);
-
-u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
- const struct ipz_qp_handle qp_handle,
- struct ehca_pfqp *pfqp,
- struct hcp_modify_qp_control_block *qqpcb,
- struct h_galpa gal);
-
-u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
- struct ehca_qp *qp);
-
-u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
- const struct ipz_qp_handle qp_handle,
- struct h_galpa gal,
- u32 port);
-
-u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
- const struct ipz_qp_handle qp_handle,
- struct h_galpa gal,
- u32 port, u32 * pma_qp_nr,
- u32 * bma_qp_nr);
-
-u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
- const struct ipz_qp_handle qp_handle,
- struct h_galpa gal,
- u16 mcg_dlid,
- u64 subnet_prefix, u64 interface_id);
-
-u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
- const struct ipz_qp_handle qp_handle,
- struct h_galpa gal,
- u16 mcg_dlid,
- u64 subnet_prefix, u64 interface_id);
-
-u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
- struct ehca_cq *cq,
- u8 force_flag);
-
-u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
- struct ehca_eq *eq);
-
-/*
- * hipz_h_alloc_resource_mr allocates MR resources in HW and FW, initialize
- * resources.
- */
-u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
- const struct ehca_mr *mr,
- const u64 vaddr,
- const u64 length,
- const u32 access_ctrl,
- const struct ipz_pd pd,
- struct ehca_mr_hipzout_parms *outparms);
-
-/* hipz_h_register_rpage_mr registers MR resource pages in HW and FW */
-u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
- const struct ehca_mr *mr,
- const u8 pagesize,
- const u8 queue_type,
- const u64 logical_address_of_page,
- const u64 count);
-
-/* hipz_h_query_mr queries MR in HW and FW */
-u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
- const struct ehca_mr *mr,
- struct ehca_mr_hipzout_parms *outparms);
-
-/* hipz_h_free_resource_mr frees MR resources in HW and FW */
-u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
- const struct ehca_mr *mr);
-
-/* hipz_h_reregister_pmr reregisters MR in HW and FW */
-u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
- const struct ehca_mr *mr,
- const u64 vaddr_in,
- const u64 length,
- const u32 access_ctrl,
- const struct ipz_pd pd,
- const u64 mr_addr_cb,
- struct ehca_mr_hipzout_parms *outparms);
-
-/* hipz_h_register_smr register shared MR in HW and FW */
-u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
- const struct ehca_mr *mr,
- const struct ehca_mr *orig_mr,
- const u64 vaddr_in,
- const u32 access_ctrl,
- const struct ipz_pd pd,
- struct ehca_mr_hipzout_parms *outparms);
-
-/*
- * hipz_h_alloc_resource_mw allocates MW resources in HW and FW, initialize
- * resources.
- */
-u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
- const struct ehca_mw *mw,
- const struct ipz_pd pd,
- struct ehca_mw_hipzout_parms *outparms);
-
-/* hipz_h_query_mw queries MW in HW and FW */
-u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
- const struct ehca_mw *mw,
- struct ehca_mw_hipzout_parms *outparms);
-
-/* hipz_h_free_resource_mw frees MW resources in HW and FW */
-u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
- const struct ehca_mw *mw);
-
-u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
- const u64 ressource_handle,
- void *rblock,
- unsigned long *byte_count);
-u64 hipz_h_eoi(int irq);
-
-#endif /* __HCP_IF_H__ */
diff --git a/drivers/staging/rdma/ehca/hcp_phyp.c b/drivers/staging/rdma/ehca/hcp_phyp.c
deleted file mode 100644
index 077376ff3d28..000000000000
--- a/drivers/staging/rdma/ehca/hcp_phyp.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * load store abstraction for ehca register access with tracing
- *
- * Authors: Christoph Raisch <raisch@de.ibm.com>
- * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "ehca_classes.h"
-#include "hipz_hw.h"
-
-u64 hcall_map_page(u64 physaddr)
-{
- return (u64)ioremap(physaddr, EHCA_PAGESIZE);
-}
-
-int hcall_unmap_page(u64 mapaddr)
-{
- iounmap((volatile void __iomem *) mapaddr);
- return 0;
-}
-
-int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
- u64 paddr_kernel, u64 paddr_user)
-{
- if (!is_user) {
- galpas->kernel.fw_handle = hcall_map_page(paddr_kernel);
- if (!galpas->kernel.fw_handle)
- return -ENOMEM;
- } else
- galpas->kernel.fw_handle = 0;
-
- galpas->user.fw_handle = paddr_user;
-
- return 0;
-}
-
-int hcp_galpas_dtor(struct h_galpas *galpas)
-{
- if (galpas->kernel.fw_handle) {
- int ret = hcall_unmap_page(galpas->kernel.fw_handle);
- if (ret)
- return ret;
- }
-
- galpas->user.fw_handle = galpas->kernel.fw_handle = 0;
-
- return 0;
-}
diff --git a/drivers/staging/rdma/ehca/hcp_phyp.h b/drivers/staging/rdma/ehca/hcp_phyp.h
deleted file mode 100644
index d1b029910249..000000000000
--- a/drivers/staging/rdma/ehca/hcp_phyp.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * Firmware calls
- *
- * Authors: Christoph Raisch <raisch@de.ibm.com>
- * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- * Waleri Fomin <fomin@de.ibm.com>
- * Gerd Bayer <gerd.bayer@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HCP_PHYP_H__
-#define __HCP_PHYP_H__
-
-
-/*
- * eHCA page (mapped into memory)
- * resource to access eHCA register pages in CPU address space
-*/
-struct h_galpa {
- u64 fw_handle;
- /* for pSeries this is a 64bit memory address where
- I/O memory is mapped into CPU address space (kv) */
-};
-
-/*
- * resource to access eHCA address space registers, all types
- */
-struct h_galpas {
- u32 pid; /*PID of userspace galpa checking */
- struct h_galpa user; /* user space accessible resource,
- set to 0 if unused */
- struct h_galpa kernel; /* kernel space accessible resource,
- set to 0 if unused */
-};
-
-static inline u64 hipz_galpa_load(struct h_galpa galpa, u32 offset)
-{
- u64 addr = galpa.fw_handle + offset;
- return *(volatile u64 __force *)addr;
-}
-
-static inline void hipz_galpa_store(struct h_galpa galpa, u32 offset, u64 value)
-{
- u64 addr = galpa.fw_handle + offset;
- *(volatile u64 __force *)addr = value;
-}
-
-int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
- u64 paddr_kernel, u64 paddr_user);
-
-int hcp_galpas_dtor(struct h_galpas *galpas);
-
-u64 hcall_map_page(u64 physaddr);
-
-int hcall_unmap_page(u64 mapaddr);
-
-#endif
diff --git a/drivers/staging/rdma/ehca/hipz_fns.h b/drivers/staging/rdma/ehca/hipz_fns.h
deleted file mode 100644
index 9dac93d02140..000000000000
--- a/drivers/staging/rdma/ehca/hipz_fns.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * HW abstraction register functions
- *
- * Authors: Christoph Raisch <raisch@de.ibm.com>
- * Reinhard Ernst <rernst@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HIPZ_FNS_H__
-#define __HIPZ_FNS_H__
-
-#include "ehca_classes.h"
-#include "hipz_hw.h"
-
-#include "hipz_fns_core.h"
-
-#define hipz_galpa_store_eq(gal, offset, value) \
- hipz_galpa_store(gal, EQTEMM_OFFSET(offset), value)
-
-#define hipz_galpa_load_eq(gal, offset) \
- hipz_galpa_load(gal, EQTEMM_OFFSET(offset))
-
-#define hipz_galpa_store_qped(gal, offset, value) \
- hipz_galpa_store(gal, QPEDMM_OFFSET(offset), value)
-
-#define hipz_galpa_load_qped(gal, offset) \
- hipz_galpa_load(gal, QPEDMM_OFFSET(offset))
-
-#define hipz_galpa_store_mrmw(gal, offset, value) \
- hipz_galpa_store(gal, MRMWMM_OFFSET(offset), value)
-
-#define hipz_galpa_load_mrmw(gal, offset) \
- hipz_galpa_load(gal, MRMWMM_OFFSET(offset))
-
-#endif
diff --git a/drivers/staging/rdma/ehca/hipz_fns_core.h b/drivers/staging/rdma/ehca/hipz_fns_core.h
deleted file mode 100644
index 868735fd3187..000000000000
--- a/drivers/staging/rdma/ehca/hipz_fns_core.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * HW abstraction register functions
- *
- * Authors: Christoph Raisch <raisch@de.ibm.com>
- * Heiko J Schick <schickhj@de.ibm.com>
- * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- * Reinhard Ernst <rernst@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HIPZ_FNS_CORE_H__
-#define __HIPZ_FNS_CORE_H__
-
-#include "hcp_phyp.h"
-#include "hipz_hw.h"
-
-#define hipz_galpa_store_cq(gal, offset, value) \
- hipz_galpa_store(gal, CQTEMM_OFFSET(offset), value)
-
-#define hipz_galpa_load_cq(gal, offset) \
- hipz_galpa_load(gal, CQTEMM_OFFSET(offset))
-
-#define hipz_galpa_store_qp(gal, offset, value) \
- hipz_galpa_store(gal, QPTEMM_OFFSET(offset), value)
-#define hipz_galpa_load_qp(gal, offset) \
- hipz_galpa_load(gal, QPTEMM_OFFSET(offset))
-
-static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes)
-{
- /* ringing doorbell :-) */
- hipz_galpa_store_qp(qp->galpas.kernel, qpx_sqa,
- EHCA_BMASK_SET(QPX_SQADDER, nr_wqes));
-}
-
-static inline void hipz_update_rqa(struct ehca_qp *qp, u16 nr_wqes)
-{
- /* ringing doorbell :-) */
- hipz_galpa_store_qp(qp->galpas.kernel, qpx_rqa,
- EHCA_BMASK_SET(QPX_RQADDER, nr_wqes));
-}
-
-static inline void hipz_update_feca(struct ehca_cq *cq, u32 nr_cqes)
-{
- hipz_galpa_store_cq(cq->galpas.kernel, cqx_feca,
- EHCA_BMASK_SET(CQX_FECADDER, nr_cqes));
-}
-
-static inline void hipz_set_cqx_n0(struct ehca_cq *cq, u32 value)
-{
- u64 cqx_n0_reg;
-
- hipz_galpa_store_cq(cq->galpas.kernel, cqx_n0,
- EHCA_BMASK_SET(CQX_N0_GENERATE_SOLICITED_COMP_EVENT,
- value));
- cqx_n0_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n0);
-}
-
-static inline void hipz_set_cqx_n1(struct ehca_cq *cq, u32 value)
-{
- u64 cqx_n1_reg;
-
- hipz_galpa_store_cq(cq->galpas.kernel, cqx_n1,
- EHCA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT, value));
- cqx_n1_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n1);
-}
-
-#endif /* __HIPZ_FNC_CORE_H__ */
diff --git a/drivers/staging/rdma/ehca/hipz_hw.h b/drivers/staging/rdma/ehca/hipz_hw.h
deleted file mode 100644
index bf996c7acc42..000000000000
--- a/drivers/staging/rdma/ehca/hipz_hw.h
+++ /dev/null
@@ -1,414 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * eHCA register definitions
- *
- * Authors: Waleri Fomin <fomin@de.ibm.com>
- * Christoph Raisch <raisch@de.ibm.com>
- * Reinhard Ernst <rernst@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HIPZ_HW_H__
-#define __HIPZ_HW_H__
-
-#include "ehca_tools.h"
-
-#define EHCA_MAX_MTU 4
-
-/* QP Table Entry Memory Map */
-struct hipz_qptemm {
- u64 qpx_hcr;
- u64 qpx_c;
- u64 qpx_herr;
- u64 qpx_aer;
-/* 0x20*/
- u64 qpx_sqa;
- u64 qpx_sqc;
- u64 qpx_rqa;
- u64 qpx_rqc;
-/* 0x40*/
- u64 qpx_st;
- u64 qpx_pmstate;
- u64 qpx_pmfa;
- u64 qpx_pkey;
-/* 0x60*/
- u64 qpx_pkeya;
- u64 qpx_pkeyb;
- u64 qpx_pkeyc;
- u64 qpx_pkeyd;
-/* 0x80*/
- u64 qpx_qkey;
- u64 qpx_dqp;
- u64 qpx_dlidp;
- u64 qpx_portp;
-/* 0xa0*/
- u64 qpx_slidp;
- u64 qpx_slidpp;
- u64 qpx_dlida;
- u64 qpx_porta;
-/* 0xc0*/
- u64 qpx_slida;
- u64 qpx_slidpa;
- u64 qpx_slvl;
- u64 qpx_ipd;
-/* 0xe0*/
- u64 qpx_mtu;
- u64 qpx_lato;
- u64 qpx_rlimit;
- u64 qpx_rnrlimit;
-/* 0x100*/
- u64 qpx_t;
- u64 qpx_sqhp;
- u64 qpx_sqptp;
- u64 qpx_nspsn;
-/* 0x120*/
- u64 qpx_nspsnhwm;
- u64 reserved1;
- u64 qpx_sdsi;
- u64 qpx_sdsbc;
-/* 0x140*/
- u64 qpx_sqwsize;
- u64 qpx_sqwts;
- u64 qpx_lsn;
- u64 qpx_nssn;
-/* 0x160 */
- u64 qpx_mor;
- u64 qpx_cor;
- u64 qpx_sqsize;
- u64 qpx_erc;
-/* 0x180*/
- u64 qpx_rnrrc;
- u64 qpx_ernrwt;
- u64 qpx_rnrresp;
- u64 qpx_lmsna;
-/* 0x1a0 */
- u64 qpx_sqhpc;
- u64 qpx_sqcptp;
- u64 qpx_sigt;
- u64 qpx_wqecnt;
-/* 0x1c0*/
- u64 qpx_rqhp;
- u64 qpx_rqptp;
- u64 qpx_rqsize;
- u64 qpx_nrr;
-/* 0x1e0*/
- u64 qpx_rdmac;
- u64 qpx_nrpsn;
- u64 qpx_lapsn;
- u64 qpx_lcr;
-/* 0x200*/
- u64 qpx_rwc;
- u64 qpx_rwva;
- u64 qpx_rdsi;
- u64 qpx_rdsbc;
-/* 0x220*/
- u64 qpx_rqwsize;
- u64 qpx_crmsn;
- u64 qpx_rdd;
- u64 qpx_larpsn;
-/* 0x240*/
- u64 qpx_pd;
- u64 qpx_scqn;
- u64 qpx_rcqn;
- u64 qpx_aeqn;
-/* 0x260*/
- u64 qpx_aaelog;
- u64 qpx_ram;
- u64 qpx_rdmaqe0;
- u64 qpx_rdmaqe1;
-/* 0x280*/
- u64 qpx_rdmaqe2;
- u64 qpx_rdmaqe3;
- u64 qpx_nrpsnhwm;
-/* 0x298*/
- u64 reserved[(0x400 - 0x298) / 8];
-/* 0x400 extended data */
- u64 reserved_ext[(0x500 - 0x400) / 8];
-/* 0x500 */
- u64 reserved2[(0x1000 - 0x500) / 8];
-/* 0x1000 */
-};
-
-#define QPX_SQADDER EHCA_BMASK_IBM(48, 63)
-#define QPX_RQADDER EHCA_BMASK_IBM(48, 63)
-#define QPX_AAELOG_RESET_SRQ_LIMIT EHCA_BMASK_IBM(3, 3)
-
-#define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm, x)
-
-/* MRMWPT Entry Memory Map */
-struct hipz_mrmwmm {
- /* 0x00 */
- u64 mrx_hcr;
-
- u64 mrx_c;
- u64 mrx_herr;
- u64 mrx_aer;
- /* 0x20 */
- u64 mrx_pp;
- u64 reserved1;
- u64 reserved2;
- u64 reserved3;
- /* 0x40 */
- u64 reserved4[(0x200 - 0x40) / 8];
- /* 0x200 */
- u64 mrx_ctl[64];
-
-};
-
-#define MRMWMM_OFFSET(x) offsetof(struct hipz_mrmwmm, x)
-
-struct hipz_qpedmm {
- /* 0x00 */
- u64 reserved0[(0x400) / 8];
- /* 0x400 */
- u64 qpedx_phh;
- u64 qpedx_ppsgp;
- /* 0x410 */
- u64 qpedx_ppsgu;
- u64 qpedx_ppdgp;
- /* 0x420 */
- u64 qpedx_ppdgu;
- u64 qpedx_aph;
- /* 0x430 */
- u64 qpedx_apsgp;
- u64 qpedx_apsgu;
- /* 0x440 */
- u64 qpedx_apdgp;
- u64 qpedx_apdgu;
- /* 0x450 */
- u64 qpedx_apav;
- u64 qpedx_apsav;
- /* 0x460 */
- u64 qpedx_hcr;
- u64 reserved1[4];
- /* 0x488 */
- u64 qpedx_rrl0;
- /* 0x490 */
- u64 qpedx_rrrkey0;
- u64 qpedx_rrva0;
- /* 0x4a0 */
- u64 reserved2;
- u64 qpedx_rrl1;
- /* 0x4b0 */
- u64 qpedx_rrrkey1;
- u64 qpedx_rrva1;
- /* 0x4c0 */
- u64 reserved3;
- u64 qpedx_rrl2;
- /* 0x4d0 */
- u64 qpedx_rrrkey2;
- u64 qpedx_rrva2;
- /* 0x4e0 */
- u64 reserved4;
- u64 qpedx_rrl3;
- /* 0x4f0 */
- u64 qpedx_rrrkey3;
- u64 qpedx_rrva3;
-};
-
-#define QPEDMM_OFFSET(x) offsetof(struct hipz_qpedmm, x)
-
-/* CQ Table Entry Memory Map */
-struct hipz_cqtemm {
- u64 cqx_hcr;
- u64 cqx_c;
- u64 cqx_herr;
- u64 cqx_aer;
-/* 0x20 */
- u64 cqx_ptp;
- u64 cqx_tp;
- u64 cqx_fec;
- u64 cqx_feca;
-/* 0x40 */
- u64 cqx_ep;
- u64 cqx_eq;
-/* 0x50 */
- u64 reserved1;
- u64 cqx_n0;
-/* 0x60 */
- u64 cqx_n1;
- u64 reserved2[(0x1000 - 0x60) / 8];
-/* 0x1000 */
-};
-
-#define CQX_FEC_CQE_CNT EHCA_BMASK_IBM(32, 63)
-#define CQX_FECADDER EHCA_BMASK_IBM(32, 63)
-#define CQX_N0_GENERATE_SOLICITED_COMP_EVENT EHCA_BMASK_IBM(0, 0)
-#define CQX_N1_GENERATE_COMP_EVENT EHCA_BMASK_IBM(0, 0)
-
-#define CQTEMM_OFFSET(x) offsetof(struct hipz_cqtemm, x)
-
-/* EQ Table Entry Memory Map */
-struct hipz_eqtemm {
- u64 eqx_hcr;
- u64 eqx_c;
-
- u64 eqx_herr;
- u64 eqx_aer;
-/* 0x20 */
- u64 eqx_ptp;
- u64 eqx_tp;
- u64 eqx_ssba;
- u64 eqx_psba;
-
-/* 0x40 */
- u64 eqx_cec;
- u64 eqx_meql;
- u64 eqx_xisbi;
- u64 eqx_xisc;
-/* 0x60 */
- u64 eqx_it;
-
-};
-
-#define EQTEMM_OFFSET(x) offsetof(struct hipz_eqtemm, x)
-
-/* access control defines for MR/MW */
-#define HIPZ_ACCESSCTRL_L_WRITE 0x00800000
-#define HIPZ_ACCESSCTRL_R_WRITE 0x00400000
-#define HIPZ_ACCESSCTRL_R_READ 0x00200000
-#define HIPZ_ACCESSCTRL_R_ATOMIC 0x00100000
-#define HIPZ_ACCESSCTRL_MW_BIND 0x00080000
-
-/* query hca response block */
-struct hipz_query_hca {
- u32 cur_reliable_dg;
- u32 cur_qp;
- u32 cur_cq;
- u32 cur_eq;
- u32 cur_mr;
- u32 cur_mw;
- u32 cur_ee_context;
- u32 cur_mcast_grp;
- u32 cur_qp_attached_mcast_grp;
- u32 reserved1;
- u32 cur_ipv6_qp;
- u32 cur_eth_qp;
- u32 cur_hp_mr;
- u32 reserved2[3];
- u32 max_rd_domain;
- u32 max_qp;
- u32 max_cq;
- u32 max_eq;
- u32 max_mr;
- u32 max_hp_mr;
- u32 max_mw;
- u32 max_mrwpte;
- u32 max_special_mrwpte;
- u32 max_rd_ee_context;
- u32 max_mcast_grp;
- u32 max_total_mcast_qp_attach;
- u32 max_mcast_qp_attach;
- u32 max_raw_ipv6_qp;
- u32 max_raw_ethy_qp;
- u32 internal_clock_frequency;
- u32 max_pd;
- u32 max_ah;
- u32 max_cqe;
- u32 max_wqes_wq;
- u32 max_partitions;
- u32 max_rr_ee_context;
- u32 max_rr_qp;
- u32 max_rr_hca;
- u32 max_act_wqs_ee_context;
- u32 max_act_wqs_qp;
- u32 max_sge;
- u32 max_sge_rd;
- u32 memory_page_size_supported;
- u64 max_mr_size;
- u32 local_ca_ack_delay;
- u32 num_ports;
- u32 vendor_id;
- u32 vendor_part_id;
- u32 hw_ver;
- u64 node_guid;
- u64 hca_cap_indicators;
- u32 data_counter_register_size;
- u32 max_shared_rq;
- u32 max_isns_eq;
- u32 max_neq;
-} __attribute__ ((packed));
-
-#define HCA_CAP_AH_PORT_NR_CHECK EHCA_BMASK_IBM( 0, 0)
-#define HCA_CAP_ATOMIC EHCA_BMASK_IBM( 1, 1)
-#define HCA_CAP_AUTO_PATH_MIG EHCA_BMASK_IBM( 2, 2)
-#define HCA_CAP_BAD_P_KEY_CTR EHCA_BMASK_IBM( 3, 3)
-#define HCA_CAP_SQD_RTS_PORT_CHANGE EHCA_BMASK_IBM( 4, 4)
-#define HCA_CAP_CUR_QP_STATE_MOD EHCA_BMASK_IBM( 5, 5)
-#define HCA_CAP_INIT_TYPE EHCA_BMASK_IBM( 6, 6)
-#define HCA_CAP_PORT_ACTIVE_EVENT EHCA_BMASK_IBM( 7, 7)
-#define HCA_CAP_Q_KEY_VIOL_CTR EHCA_BMASK_IBM( 8, 8)
-#define HCA_CAP_WQE_RESIZE EHCA_BMASK_IBM( 9, 9)
-#define HCA_CAP_RAW_PACKET_MCAST EHCA_BMASK_IBM(10, 10)
-#define HCA_CAP_SHUTDOWN_PORT EHCA_BMASK_IBM(11, 11)
-#define HCA_CAP_RC_LL_QP EHCA_BMASK_IBM(12, 12)
-#define HCA_CAP_SRQ EHCA_BMASK_IBM(13, 13)
-#define HCA_CAP_UD_LL_QP EHCA_BMASK_IBM(16, 16)
-#define HCA_CAP_RESIZE_MR EHCA_BMASK_IBM(17, 17)
-#define HCA_CAP_MINI_QP EHCA_BMASK_IBM(18, 18)
-#define HCA_CAP_H_ALLOC_RES_SYNC EHCA_BMASK_IBM(19, 19)
-
-/* query port response block */
-struct hipz_query_port {
- u32 state;
- u32 bad_pkey_cntr;
- u32 lmc;
- u32 lid;
- u32 subnet_timeout;
- u32 qkey_viol_cntr;
- u32 sm_sl;
- u32 sm_lid;
- u32 capability_mask;
- u32 init_type_reply;
- u32 pkey_tbl_len;
- u32 gid_tbl_len;
- u64 gid_prefix;
- u32 port_nr;
- u16 pkey_entries[16];
- u8 reserved1[32];
- u32 trent_size;
- u32 trbuf_size;
- u64 max_msg_sz;
- u32 max_mtu;
- u32 vl_cap;
- u32 phys_pstate;
- u32 phys_state;
- u32 phys_speed;
- u32 phys_width;
- u8 reserved2[1884];
- u64 guid_entries[255];
-} __attribute__ ((packed));
-
-#endif
diff --git a/drivers/staging/rdma/ehca/ipz_pt_fn.c b/drivers/staging/rdma/ehca/ipz_pt_fn.c
deleted file mode 100644
index 7ffc748cb973..000000000000
--- a/drivers/staging/rdma/ehca/ipz_pt_fn.c
+++ /dev/null
@@ -1,289 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * internal queue handling
- *
- * Authors: Waleri Fomin <fomin@de.ibm.com>
- * Reinhard Ernst <rernst@de.ibm.com>
- * Christoph Raisch <raisch@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_tools.h"
-#include "ipz_pt_fn.h"
-#include "ehca_classes.h"
-
-#define PAGES_PER_KPAGE (PAGE_SIZE >> EHCA_PAGESHIFT)
-
-struct kmem_cache *small_qp_cache;
-
-void *ipz_qpageit_get_inc(struct ipz_queue *queue)
-{
- void *ret = ipz_qeit_get(queue);
- queue->current_q_offset += queue->pagesize;
- if (queue->current_q_offset > queue->queue_length) {
- queue->current_q_offset -= queue->pagesize;
- ret = NULL;
- }
- if (((u64)ret) % queue->pagesize) {
- ehca_gen_err("ERROR!! not at PAGE-Boundary");
- return NULL;
- }
- return ret;
-}
-
-void *ipz_qeit_eq_get_inc(struct ipz_queue *queue)
-{
- void *ret = ipz_qeit_get(queue);
- u64 last_entry_in_q = queue->queue_length - queue->qe_size;
-
- queue->current_q_offset += queue->qe_size;
- if (queue->current_q_offset > last_entry_in_q) {
- queue->current_q_offset = 0;
- queue->toggle_state = (~queue->toggle_state) & 1;
- }
-
- return ret;
-}
-
-int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset)
-{
- int i;
- for (i = 0; i < queue->queue_length / queue->pagesize; i++) {
- u64 page = __pa(queue->queue_pages[i]);
- if (addr >= page && addr < page + queue->pagesize) {
- *q_offset = addr - page + i * queue->pagesize;
- return 0;
- }
- }
- return -EINVAL;
-}
-
-#if PAGE_SHIFT < EHCA_PAGESHIFT
-#error Kernel pages must be at least as large than eHCA pages (4K) !
-#endif
-
-/*
- * allocate pages for queue:
- * outer loop allocates whole kernel pages (page aligned) and
- * inner loop divides a kernel page into smaller hca queue pages
- */
-static int alloc_queue_pages(struct ipz_queue *queue, const u32 nr_of_pages)
-{
- int k, f = 0;
- u8 *kpage;
-
- while (f < nr_of_pages) {
- kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
- if (!kpage)
- goto out;
-
- for (k = 0; k < PAGES_PER_KPAGE && f < nr_of_pages; k++) {
- queue->queue_pages[f] = (struct ipz_page *)kpage;
- kpage += EHCA_PAGESIZE;
- f++;
- }
- }
- return 1;
-
-out:
- for (f = 0; f < nr_of_pages && queue->queue_pages[f];
- f += PAGES_PER_KPAGE)
- free_page((unsigned long)(queue->queue_pages)[f]);
- return 0;
-}
-
-static int alloc_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
-{
- int order = ilog2(queue->pagesize) - 9;
- struct ipz_small_queue_page *page;
- unsigned long bit;
-
- mutex_lock(&pd->lock);
-
- if (!list_empty(&pd->free[order]))
- page = list_entry(pd->free[order].next,
- struct ipz_small_queue_page, list);
- else {
- page = kmem_cache_zalloc(small_qp_cache, GFP_KERNEL);
- if (!page)
- goto out;
-
- page->page = get_zeroed_page(GFP_KERNEL);
- if (!page->page) {
- kmem_cache_free(small_qp_cache, page);
- goto out;
- }
-
- list_add(&page->list, &pd->free[order]);
- }
-
- bit = find_first_zero_bit(page->bitmap, IPZ_SPAGE_PER_KPAGE >> order);
- __set_bit(bit, page->bitmap);
- page->fill++;
-
- if (page->fill == IPZ_SPAGE_PER_KPAGE >> order)
- list_move(&page->list, &pd->full[order]);
-
- mutex_unlock(&pd->lock);
-
- queue->queue_pages[0] = (void *)(page->page | (bit << (order + 9)));
- queue->small_page = page;
- queue->offset = bit << (order + 9);
- return 1;
-
-out:
- ehca_err(pd->ib_pd.device, "failed to allocate small queue page");
- mutex_unlock(&pd->lock);
- return 0;
-}
-
-static void free_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
-{
- int order = ilog2(queue->pagesize) - 9;
- struct ipz_small_queue_page *page = queue->small_page;
- unsigned long bit;
- int free_page = 0;
-
- bit = ((unsigned long)queue->queue_pages[0] & ~PAGE_MASK)
- >> (order + 9);
-
- mutex_lock(&pd->lock);
-
- __clear_bit(bit, page->bitmap);
- page->fill--;
-
- if (page->fill == 0) {
- list_del(&page->list);
- free_page = 1;
- }
-
- if (page->fill == (IPZ_SPAGE_PER_KPAGE >> order) - 1)
- /* the page was full until we freed the chunk */
- list_move_tail(&page->list, &pd->free[order]);
-
- mutex_unlock(&pd->lock);
-
- if (free_page) {
- free_page(page->page);
- kmem_cache_free(small_qp_cache, page);
- }
-}
-
-int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
- const u32 nr_of_pages, const u32 pagesize,
- const u32 qe_size, const u32 nr_of_sg,
- int is_small)
-{
- if (pagesize > PAGE_SIZE) {
- ehca_gen_err("FATAL ERROR: pagesize=%x "
- "is greater than kernel page size", pagesize);
- return 0;
- }
-
- /* init queue fields */
- queue->queue_length = nr_of_pages * pagesize;
- queue->pagesize = pagesize;
- queue->qe_size = qe_size;
- queue->act_nr_of_sg = nr_of_sg;
- queue->current_q_offset = 0;
- queue->toggle_state = 1;
- queue->small_page = NULL;
-
- /* allocate queue page pointers */
- queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *),
- GFP_KERNEL | __GFP_NOWARN);
- if (!queue->queue_pages) {
- queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *));
- if (!queue->queue_pages) {
- ehca_gen_err("Couldn't allocate queue page list");
- return 0;
- }
- }
-
- /* allocate actual queue pages */
- if (is_small) {
- if (!alloc_small_queue_page(queue, pd))
- goto ipz_queue_ctor_exit0;
- } else
- if (!alloc_queue_pages(queue, nr_of_pages))
- goto ipz_queue_ctor_exit0;
-
- return 1;
-
-ipz_queue_ctor_exit0:
- ehca_gen_err("Couldn't alloc pages queue=%p "
- "nr_of_pages=%x", queue, nr_of_pages);
- kvfree(queue->queue_pages);
-
- return 0;
-}
-
-int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue)
-{
- int i, nr_pages;
-
- if (!queue || !queue->queue_pages) {
- ehca_gen_dbg("queue or queue_pages is NULL");
- return 0;
- }
-
- if (queue->small_page)
- free_small_queue_page(queue, pd);
- else {
- nr_pages = queue->queue_length / queue->pagesize;
- for (i = 0; i < nr_pages; i += PAGES_PER_KPAGE)
- free_page((unsigned long)queue->queue_pages[i]);
- }
-
- kvfree(queue->queue_pages);
-
- return 1;
-}
-
-int ehca_init_small_qp_cache(void)
-{
- small_qp_cache = kmem_cache_create("ehca_cache_small_qp",
- sizeof(struct ipz_small_queue_page),
- 0, SLAB_HWCACHE_ALIGN, NULL);
- if (!small_qp_cache)
- return -ENOMEM;
-
- return 0;
-}
-
-void ehca_cleanup_small_qp_cache(void)
-{
- kmem_cache_destroy(small_qp_cache);
-}
diff --git a/drivers/staging/rdma/ehca/ipz_pt_fn.h b/drivers/staging/rdma/ehca/ipz_pt_fn.h
deleted file mode 100644
index a801274ea337..000000000000
--- a/drivers/staging/rdma/ehca/ipz_pt_fn.h
+++ /dev/null
@@ -1,289 +0,0 @@
-/*
- * IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- * internal queue handling
- *
- * Authors: Waleri Fomin <fomin@de.ibm.com>
- * Reinhard Ernst <rernst@de.ibm.com>
- * Christoph Raisch <raisch@de.ibm.com>
- *
- * Copyright (c) 2005 IBM Corporation
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __IPZ_PT_FN_H__
-#define __IPZ_PT_FN_H__
-
-#define EHCA_PAGESHIFT 12
-#define EHCA_PAGESIZE 4096UL
-#define EHCA_PAGEMASK (~(EHCA_PAGESIZE-1))
-#define EHCA_PT_ENTRIES 512UL
-
-#include "ehca_tools.h"
-#include "ehca_qes.h"
-
-struct ehca_pd;
-struct ipz_small_queue_page;
-
-extern struct kmem_cache *small_qp_cache;
-
-/* struct generic ehca page */
-struct ipz_page {
- u8 entries[EHCA_PAGESIZE];
-};
-
-#define IPZ_SPAGE_PER_KPAGE (PAGE_SIZE / 512)
-
-struct ipz_small_queue_page {
- unsigned long page;
- unsigned long bitmap[IPZ_SPAGE_PER_KPAGE / BITS_PER_LONG];
- int fill;
- void *mapped_addr;
- u32 mmap_count;
- struct list_head list;
-};
-
-/* struct generic queue in linux kernel virtual memory (kv) */
-struct ipz_queue {
- u64 current_q_offset; /* current queue entry */
-
- struct ipz_page **queue_pages; /* array of pages belonging to queue */
- u32 qe_size; /* queue entry size */
- u32 act_nr_of_sg;
- u32 queue_length; /* queue length allocated in bytes */
- u32 pagesize;
- u32 toggle_state; /* toggle flag - per page */
- u32 offset; /* save offset within page for small_qp */
- struct ipz_small_queue_page *small_page;
-};
-
-/*
- * return current Queue Entry for a certain q_offset
- * returns address (kv) of Queue Entry
- */
-static inline void *ipz_qeit_calc(struct ipz_queue *queue, u64 q_offset)
-{
- struct ipz_page *current_page;
- if (q_offset >= queue->queue_length)
- return NULL;
- current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT];
- return &current_page->entries[q_offset & (EHCA_PAGESIZE - 1)];
-}
-
-/*
- * return current Queue Entry
- * returns address (kv) of Queue Entry
- */
-static inline void *ipz_qeit_get(struct ipz_queue *queue)
-{
- return ipz_qeit_calc(queue, queue->current_q_offset);
-}
-
-/*
- * return current Queue Page , increment Queue Page iterator from
- * page to page in struct ipz_queue, last increment will return 0! and
- * NOT wrap
- * returns address (kv) of Queue Page
- * warning don't use in parallel with ipz_QE_get_inc()
- */
-void *ipz_qpageit_get_inc(struct ipz_queue *queue);
-
-/*
- * return current Queue Entry, increment Queue Entry iterator by one
- * step in struct ipz_queue, will wrap in ringbuffer
- * returns address (kv) of Queue Entry BEFORE increment
- * warning don't use in parallel with ipz_qpageit_get_inc()
- */
-static inline void *ipz_qeit_get_inc(struct ipz_queue *queue)
-{
- void *ret = ipz_qeit_get(queue);
- queue->current_q_offset += queue->qe_size;
- if (queue->current_q_offset >= queue->queue_length) {
- queue->current_q_offset = 0;
- /* toggle the valid flag */
- queue->toggle_state = (~queue->toggle_state) & 1;
- }
-
- return ret;
-}
-
-/*
- * return a bool indicating whether current Queue Entry is valid
- */
-static inline int ipz_qeit_is_valid(struct ipz_queue *queue)
-{
- struct ehca_cqe *cqe = ipz_qeit_get(queue);
- return ((cqe->cqe_flags >> 7) == (queue->toggle_state & 1));
-}
-
-/*
- * return current Queue Entry, increment Queue Entry iterator by one
- * step in struct ipz_queue, will wrap in ringbuffer
- * returns address (kv) of Queue Entry BEFORE increment
- * returns 0 and does not increment, if wrong valid state
- * warning don't use in parallel with ipz_qpageit_get_inc()
- */
-static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue)
-{
- return ipz_qeit_is_valid(queue) ? ipz_qeit_get_inc(queue) : NULL;
-}
-
-/*
- * returns and resets Queue Entry iterator
- * returns address (kv) of first Queue Entry
- */
-static inline void *ipz_qeit_reset(struct ipz_queue *queue)
-{
- queue->current_q_offset = 0;
- return ipz_qeit_get(queue);
-}
-
-/*
- * return the q_offset corresponding to an absolute address
- */
-int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset);
-
-/*
- * return the next queue offset. don't modify the queue.
- */
-static inline u64 ipz_queue_advance_offset(struct ipz_queue *queue, u64 offset)
-{
- offset += queue->qe_size;
- if (offset >= queue->queue_length) offset = 0;
- return offset;
-}
-
-/* struct generic page table */
-struct ipz_pt {
- u64 entries[EHCA_PT_ENTRIES];
-};
-
-/* struct page table for a queue, only to be used in pf */
-struct ipz_qpt {
- /* queue page tables (kv), use u64 because we know the element length */
- u64 *qpts;
- u32 n_qpts;
- u32 n_ptes; /* number of page table entries */
- u64 *current_pte_addr;
-};
-
-/*
- * constructor for a ipz_queue_t, placement new for ipz_queue_t,
- * new for all dependent datastructors
- * all QP Tables are the same
- * flow:
- * allocate+pin queue
- * see ipz_qpt_ctor()
- * returns true if ok, false if out of memory
- */
-int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
- const u32 nr_of_pages, const u32 pagesize,
- const u32 qe_size, const u32 nr_of_sg,
- int is_small);
-
-/*
- * destructor for a ipz_queue_t
- * -# free queue
- * see ipz_queue_ctor()
- * returns true if ok, false if queue was NULL-ptr of free failed
- */
-int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue);
-
-/*
- * constructor for a ipz_qpt_t,
- * placement new for struct ipz_queue, new for all dependent datastructors
- * all QP Tables are the same,
- * flow:
- * -# allocate+pin queue
- * -# initialise ptcb
- * -# allocate+pin PTs
- * -# link PTs to a ring, according to HCA Arch, set bit62 id needed
- * -# the ring must have room for exactly nr_of_PTEs
- * see ipz_qpt_ctor()
- */
-void ipz_qpt_ctor(struct ipz_qpt *qpt,
- const u32 nr_of_qes,
- const u32 pagesize,
- const u32 qe_size,
- const u8 lowbyte, const u8 toggle,
- u32 * act_nr_of_QEs, u32 * act_nr_of_pages);
-
-/*
- * return current Queue Entry, increment Queue Entry iterator by one
- * step in struct ipz_queue, will wrap in ringbuffer
- * returns address (kv) of Queue Entry BEFORE increment
- * warning don't use in parallel with ipz_qpageit_get_inc()
- * warning unpredictable results may occur if steps>act_nr_of_queue_entries
- * fix EQ page problems
- */
-void *ipz_qeit_eq_get_inc(struct ipz_queue *queue);
-
-/*
- * return current Event Queue Entry, increment Queue Entry iterator
- * by one step in struct ipz_queue if valid, will wrap in ringbuffer
- * returns address (kv) of Queue Entry BEFORE increment
- * returns 0 and does not increment, if wrong valid state
- * warning don't use in parallel with ipz_queue_QPageit_get_inc()
- * warning unpredictable results may occur if steps>act_nr_of_queue_entries
- */
-static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue)
-{
- void *ret = ipz_qeit_get(queue);
- u32 qe = *(u8 *)ret;
- if ((qe >> 7) != (queue->toggle_state & 1))
- return NULL;
- ipz_qeit_eq_get_inc(queue); /* this is a good one */
- return ret;
-}
-
-static inline void *ipz_eqit_eq_peek_valid(struct ipz_queue *queue)
-{
- void *ret = ipz_qeit_get(queue);
- u32 qe = *(u8 *)ret;
- if ((qe >> 7) != (queue->toggle_state & 1))
- return NULL;
- return ret;
-}
-
-/* returns address (GX) of first queue entry */
-static inline u64 ipz_qpt_get_firstpage(struct ipz_qpt *qpt)
-{
- return be64_to_cpu(qpt->qpts[0]);
-}
-
-/* returns address (kv) of first page of queue page table */
-static inline void *ipz_qpt_get_qpt(struct ipz_qpt *qpt)
-{
- return qpt->qpts;
-}
-
-#endif /* __IPZ_PT_FN_H__ */
diff --git a/drivers/staging/rdma/ipath/Kconfig b/drivers/staging/rdma/ipath/Kconfig
deleted file mode 100644
index 041ce0634968..000000000000
--- a/drivers/staging/rdma/ipath/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-config INFINIBAND_IPATH
- tristate "QLogic HTX HCA support"
- depends on 64BIT && NET && HT_IRQ
- ---help---
- This is a driver for the deprecated QLogic Hyper-Transport
- IB host channel adapter (model QHT7140),
- including InfiniBand verbs support. This driver allows these
- devices to be used with both kernel upper level protocols such
- as IP-over-InfiniBand as well as with userspace applications
- (in conjunction with InfiniBand userspace access).
- For QLogic PCIe QLE based cards, use the QIB driver instead.
-
- If you have this hardware you will need to boot with PAT disabled
- on your x86-64 systems, use the nopat kernel parameter.
-
- Note that this driver will soon be removed entirely from the kernel.
diff --git a/drivers/staging/rdma/ipath/Makefile b/drivers/staging/rdma/ipath/Makefile
deleted file mode 100644
index 4496f2820c92..000000000000
--- a/drivers/staging/rdma/ipath/Makefile
+++ /dev/null
@@ -1,37 +0,0 @@
-ccflags-y := -DIPATH_IDSTR='"QLogic kernel.org driver"' \
- -DIPATH_KERN_TYPE=0
-
-obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o
-
-ib_ipath-y := \
- ipath_cq.o \
- ipath_diag.o \
- ipath_dma.o \
- ipath_driver.o \
- ipath_eeprom.o \
- ipath_file_ops.o \
- ipath_fs.o \
- ipath_init_chip.o \
- ipath_intr.o \
- ipath_keys.o \
- ipath_mad.o \
- ipath_mmap.o \
- ipath_mr.o \
- ipath_qp.o \
- ipath_rc.o \
- ipath_ruc.o \
- ipath_sdma.o \
- ipath_srq.o \
- ipath_stats.o \
- ipath_sysfs.o \
- ipath_uc.o \
- ipath_ud.o \
- ipath_user_pages.o \
- ipath_user_sdma.o \
- ipath_verbs_mcast.o \
- ipath_verbs.o
-
-ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o
-
-ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
-ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
diff --git a/drivers/staging/rdma/ipath/TODO b/drivers/staging/rdma/ipath/TODO
deleted file mode 100644
index cb00158d64c8..000000000000
--- a/drivers/staging/rdma/ipath/TODO
+++ /dev/null
@@ -1,5 +0,0 @@
-The ipath driver has been moved to staging in preparation for its removal in a
-few releases. The driver will be deleted during the 4.6 merge window.
-
-Contact Dennis Dalessandro <dennis.dalessandro@intel.com> and
-Cc: linux-rdma@vger.kernel.org
diff --git a/drivers/staging/rdma/ipath/ipath_common.h b/drivers/staging/rdma/ipath/ipath_common.h
deleted file mode 100644
index 28cfe97cf1e9..000000000000
--- a/drivers/staging/rdma/ipath/ipath_common.h
+++ /dev/null
@@ -1,851 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _IPATH_COMMON_H
-#define _IPATH_COMMON_H
-
-/*
- * This file contains defines, structures, etc. that are used
- * to communicate between kernel and user code.
- */
-
-
-/* This is the IEEE-assigned OUI for QLogic Inc. InfiniPath */
-#define IPATH_SRC_OUI_1 0x00
-#define IPATH_SRC_OUI_2 0x11
-#define IPATH_SRC_OUI_3 0x75
-
-/* version of protocol header (known to chip also). In the long run,
- * we should be able to generate and accept a range of version numbers;
- * for now we only accept one, and it's compiled in.
- */
-#define IPS_PROTO_VERSION 2
-
-/*
- * These are compile time constants that you may want to enable or disable
- * if you are trying to debug problems with code or performance.
- * IPATH_VERBOSE_TRACING define as 1 if you want additional tracing in
- * fastpath code
- * IPATH_TRACE_REGWRITES define as 1 if you want register writes to be
- * traced in faspath code
- * _IPATH_TRACING define as 0 if you want to remove all tracing in a
- * compilation unit
- * _IPATH_DEBUGGING define as 0 if you want to remove debug prints
- */
-
-/*
- * The value in the BTH QP field that InfiniPath uses to differentiate
- * an infinipath protocol IB packet vs standard IB transport
- */
-#define IPATH_KD_QP 0x656b79
-
-/*
- * valid states passed to ipath_set_linkstate() user call
- */
-#define IPATH_IB_LINKDOWN 0
-#define IPATH_IB_LINKARM 1
-#define IPATH_IB_LINKACTIVE 2
-#define IPATH_IB_LINKDOWN_ONLY 3
-#define IPATH_IB_LINKDOWN_SLEEP 4
-#define IPATH_IB_LINKDOWN_DISABLE 5
-#define IPATH_IB_LINK_LOOPBACK 6 /* enable local loopback */
-#define IPATH_IB_LINK_EXTERNAL 7 /* normal, disable local loopback */
-#define IPATH_IB_LINK_NO_HRTBT 8 /* disable Heartbeat, e.g. for loopback */
-#define IPATH_IB_LINK_HRTBT 9 /* enable heartbeat, normal, non-loopback */
-
-/*
- * These 3 values (SDR and DDR may be ORed for auto-speed
- * negotiation) are used for the 3rd argument to path_f_set_ib_cfg
- * with cmd IPATH_IB_CFG_SPD_ENB, by direct calls or via sysfs. They
- * are also the the possible values for ipath_link_speed_enabled and active
- * The values were chosen to match values used within the IB spec.
- */
-#define IPATH_IB_SDR 1
-#define IPATH_IB_DDR 2
-
-/*
- * stats maintained by the driver. For now, at least, this is global
- * to all minor devices.
- */
-struct infinipath_stats {
- /* number of interrupts taken */
- __u64 sps_ints;
- /* number of interrupts for errors */
- __u64 sps_errints;
- /* number of errors from chip (not incl. packet errors or CRC) */
- __u64 sps_errs;
- /* number of packet errors from chip other than CRC */
- __u64 sps_pkterrs;
- /* number of packets with CRC errors (ICRC and VCRC) */
- __u64 sps_crcerrs;
- /* number of hardware errors reported (parity, etc.) */
- __u64 sps_hwerrs;
- /* number of times IB link changed state unexpectedly */
- __u64 sps_iblink;
- __u64 sps_unused; /* was fastrcvint, no longer implemented */
- /* number of kernel (port0) packets received */
- __u64 sps_port0pkts;
- /* number of "ethernet" packets sent by driver */
- __u64 sps_ether_spkts;
- /* number of "ethernet" packets received by driver */
- __u64 sps_ether_rpkts;
- /* number of SMA packets sent by driver. Obsolete. */
- __u64 sps_sma_spkts;
- /* number of SMA packets received by driver. Obsolete. */
- __u64 sps_sma_rpkts;
- /* number of times all ports rcvhdrq was full and packet dropped */
- __u64 sps_hdrqfull;
- /* number of times all ports egrtid was full and packet dropped */
- __u64 sps_etidfull;
- /*
- * number of times we tried to send from driver, but no pio buffers
- * avail
- */
- __u64 sps_nopiobufs;
- /* number of ports currently open */
- __u64 sps_ports;
- /* list of pkeys (other than default) accepted (0 means not set) */
- __u16 sps_pkeys[4];
- __u16 sps_unused16[4]; /* available; maintaining compatible layout */
- /* number of user ports per chip (not IB ports) */
- __u32 sps_nports;
- /* not our interrupt, or already handled */
- __u32 sps_nullintr;
- /* max number of packets handled per receive call */
- __u32 sps_maxpkts_call;
- /* avg number of packets handled per receive call */
- __u32 sps_avgpkts_call;
- /* total number of pages locked */
- __u64 sps_pagelocks;
- /* total number of pages unlocked */
- __u64 sps_pageunlocks;
- /*
- * Number of packets dropped in kernel other than errors (ether
- * packets if ipath not configured, etc.)
- */
- __u64 sps_krdrops;
- __u64 sps_txeparity; /* PIO buffer parity error, recovered */
- /* pad for future growth */
- __u64 __sps_pad[45];
-};
-
-/*
- * These are the status bits readable (in ascii form, 64bit value)
- * from the "status" sysfs file.
- */
-#define IPATH_STATUS_INITTED 0x1 /* basic initialization done */
-#define IPATH_STATUS_DISABLED 0x2 /* hardware disabled */
-/* Device has been disabled via admin request */
-#define IPATH_STATUS_ADMIN_DISABLED 0x4
-/* Chip has been found and initted */
-#define IPATH_STATUS_CHIP_PRESENT 0x20
-/* IB link is at ACTIVE, usable for data traffic */
-#define IPATH_STATUS_IB_READY 0x40
-/* link is configured, LID, MTU, etc. have been set */
-#define IPATH_STATUS_IB_CONF 0x80
-/* no link established, probably no cable */
-#define IPATH_STATUS_IB_NOCABLE 0x100
-/* A Fatal hardware error has occurred. */
-#define IPATH_STATUS_HWERROR 0x200
-
-/*
- * The list of usermode accessible registers. Also see Reg_* later in file.
- */
-typedef enum _ipath_ureg {
- /* (RO) DMA RcvHdr to be used next. */
- ur_rcvhdrtail = 0,
- /* (RW) RcvHdr entry to be processed next by host. */
- ur_rcvhdrhead = 1,
- /* (RO) Index of next Eager index to use. */
- ur_rcvegrindextail = 2,
- /* (RW) Eager TID to be processed next */
- ur_rcvegrindexhead = 3,
- /* For internal use only; max register number. */
- _IPATH_UregMax
-} ipath_ureg;
-
-/* bit values for spi_runtime_flags */
-#define IPATH_RUNTIME_HT 0x1
-#define IPATH_RUNTIME_PCIE 0x2
-#define IPATH_RUNTIME_FORCE_WC_ORDER 0x4
-#define IPATH_RUNTIME_RCVHDR_COPY 0x8
-#define IPATH_RUNTIME_MASTER 0x10
-#define IPATH_RUNTIME_NODMA_RTAIL 0x80
-#define IPATH_RUNTIME_SDMA 0x200
-#define IPATH_RUNTIME_FORCE_PIOAVAIL 0x400
-#define IPATH_RUNTIME_PIO_REGSWAPPED 0x800
-
-/*
- * This structure is returned by ipath_userinit() immediately after
- * open to get implementation-specific info, and info specific to this
- * instance.
- *
- * This struct must have explict pad fields where type sizes
- * may result in different alignments between 32 and 64 bit
- * programs, since the 64 bit * bit kernel requires the user code
- * to have matching offsets
- */
-struct ipath_base_info {
- /* version of hardware, for feature checking. */
- __u32 spi_hw_version;
- /* version of software, for feature checking. */
- __u32 spi_sw_version;
- /* InfiniPath port assigned, goes into sent packets */
- __u16 spi_port;
- __u16 spi_subport;
- /*
- * IB MTU, packets IB data must be less than this.
- * The MTU is in bytes, and will be a multiple of 4 bytes.
- */
- __u32 spi_mtu;
- /*
- * Size of a PIO buffer. Any given packet's total size must be less
- * than this (in words). Included is the starting control word, so
- * if 513 is returned, then total pkt size is 512 words or less.
- */
- __u32 spi_piosize;
- /* size of the TID cache in infinipath, in entries */
- __u32 spi_tidcnt;
- /* size of the TID Eager list in infinipath, in entries */
- __u32 spi_tidegrcnt;
- /* size of a single receive header queue entry in words. */
- __u32 spi_rcvhdrent_size;
- /*
- * Count of receive header queue entries allocated.
- * This may be less than the spu_rcvhdrcnt passed in!.
- */
- __u32 spi_rcvhdr_cnt;
-
- /* per-chip and other runtime features bitmap (IPATH_RUNTIME_*) */
- __u32 spi_runtime_flags;
-
- /* address where receive buffer queue is mapped into */
- __u64 spi_rcvhdr_base;
-
- /* user program. */
-
- /* base address of eager TID receive buffers. */
- __u64 spi_rcv_egrbufs;
-
- /* Allocated by initialization code, not by protocol. */
-
- /*
- * Size of each TID buffer in host memory, starting at
- * spi_rcv_egrbufs. The buffers are virtually contiguous.
- */
- __u32 spi_rcv_egrbufsize;
- /*
- * The special QP (queue pair) value that identifies an infinipath
- * protocol packet from standard IB packets. More, probably much
- * more, to be added.
- */
- __u32 spi_qpair;
-
- /*
- * User register base for init code, not to be used directly by
- * protocol or applications.
- */
- __u64 __spi_uregbase;
- /*
- * Maximum buffer size in bytes that can be used in a single TID
- * entry (assuming the buffer is aligned to this boundary). This is
- * the minimum of what the hardware and software support Guaranteed
- * to be a power of 2.
- */
- __u32 spi_tid_maxsize;
- /*
- * alignment of each pio send buffer (byte count
- * to add to spi_piobufbase to get to second buffer)
- */
- __u32 spi_pioalign;
- /*
- * The index of the first pio buffer available to this process;
- * needed to do lookup in spi_pioavailaddr; not added to
- * spi_piobufbase.
- */
- __u32 spi_pioindex;
- /* number of buffers mapped for this process */
- __u32 spi_piocnt;
-
- /*
- * Base address of writeonly pio buffers for this process.
- * Each buffer has spi_piosize words, and is aligned on spi_pioalign
- * boundaries. spi_piocnt buffers are mapped from this address
- */
- __u64 spi_piobufbase;
-
- /*
- * Base address of readonly memory copy of the pioavail registers.
- * There are 2 bits for each buffer.
- */
- __u64 spi_pioavailaddr;
-
- /*
- * Address where driver updates a copy of the interface and driver
- * status (IPATH_STATUS_*) as a 64 bit value. It's followed by a
- * string indicating hardware error, if there was one.
- */
- __u64 spi_status;
-
- /* number of chip ports available to user processes */
- __u32 spi_nports;
- /* unit number of chip we are using */
- __u32 spi_unit;
- /* num bufs in each contiguous set */
- __u32 spi_rcv_egrperchunk;
- /* size in bytes of each contiguous set */
- __u32 spi_rcv_egrchunksize;
- /* total size of mmap to cover full rcvegrbuffers */
- __u32 spi_rcv_egrbuftotlen;
- __u32 spi_filler_for_align;
- /* address of readonly memory copy of the rcvhdrq tail register. */
- __u64 spi_rcvhdr_tailaddr;
-
- /* shared memory pages for subports if port is shared */
- __u64 spi_subport_uregbase;
- __u64 spi_subport_rcvegrbuf;
- __u64 spi_subport_rcvhdr_base;
-
- /* shared memory page for hardware port if it is shared */
- __u64 spi_port_uregbase;
- __u64 spi_port_rcvegrbuf;
- __u64 spi_port_rcvhdr_base;
- __u64 spi_port_rcvhdr_tailaddr;
-
-} __attribute__ ((aligned(8)));
-
-
-/*
- * This version number is given to the driver by the user code during
- * initialization in the spu_userversion field of ipath_user_info, so
- * the driver can check for compatibility with user code.
- *
- * The major version changes when data structures
- * change in an incompatible way. The driver must be the same or higher
- * for initialization to succeed. In some cases, a higher version
- * driver will not interoperate with older software, and initialization
- * will return an error.
- */
-#define IPATH_USER_SWMAJOR 1
-
-/*
- * Minor version differences are always compatible
- * a within a major version, however if user software is larger
- * than driver software, some new features and/or structure fields
- * may not be implemented; the user code must deal with this if it
- * cares, or it must abort after initialization reports the difference.
- */
-#define IPATH_USER_SWMINOR 6
-
-#define IPATH_USER_SWVERSION ((IPATH_USER_SWMAJOR<<16) | IPATH_USER_SWMINOR)
-
-#define IPATH_KERN_TYPE 0
-
-/*
- * Similarly, this is the kernel version going back to the user. It's
- * slightly different, in that we want to tell if the driver was built as
- * part of a QLogic release, or from the driver from openfabrics.org,
- * kernel.org, or a standard distribution, for support reasons.
- * The high bit is 0 for non-QLogic and 1 for QLogic-built/supplied.
- *
- * It's returned by the driver to the user code during initialization in the
- * spi_sw_version field of ipath_base_info, so the user code can in turn
- * check for compatibility with the kernel.
-*/
-#define IPATH_KERN_SWVERSION ((IPATH_KERN_TYPE<<31) | IPATH_USER_SWVERSION)
-
-/*
- * This structure is passed to ipath_userinit() to tell the driver where
- * user code buffers are, sizes, etc. The offsets and sizes of the
- * fields must remain unchanged, for binary compatibility. It can
- * be extended, if userversion is changed so user code can tell, if needed
- */
-struct ipath_user_info {
- /*
- * version of user software, to detect compatibility issues.
- * Should be set to IPATH_USER_SWVERSION.
- */
- __u32 spu_userversion;
-
- /* desired number of receive header queue entries */
- __u32 spu_rcvhdrcnt;
-
- /* size of struct base_info to write to */
- __u32 spu_base_info_size;
-
- /*
- * number of words in KD protocol header
- * This tells InfiniPath how many words to copy to rcvhdrq. If 0,
- * kernel uses a default. Once set, attempts to set any other value
- * are an error (EAGAIN) until driver is reloaded.
- */
- __u32 spu_rcvhdrsize;
-
- /*
- * If two or more processes wish to share a port, each process
- * must set the spu_subport_cnt and spu_subport_id to the same
- * values. The only restriction on the spu_subport_id is that
- * it be unique for a given node.
- */
- __u16 spu_subport_cnt;
- __u16 spu_subport_id;
-
- __u32 spu_unused; /* kept for compatible layout */
-
- /*
- * address of struct base_info to write to
- */
- __u64 spu_base_info;
-
-} __attribute__ ((aligned(8)));
-
-/* User commands. */
-
-#define IPATH_CMD_MIN 16
-
-#define __IPATH_CMD_USER_INIT 16 /* old set up userspace (for old user code) */
-#define IPATH_CMD_PORT_INFO 17 /* find out what resources we got */
-#define IPATH_CMD_RECV_CTRL 18 /* control receipt of packets */
-#define IPATH_CMD_TID_UPDATE 19 /* update expected TID entries */
-#define IPATH_CMD_TID_FREE 20 /* free expected TID entries */
-#define IPATH_CMD_SET_PART_KEY 21 /* add partition key */
-#define __IPATH_CMD_SLAVE_INFO 22 /* return info on slave processes (for old user code) */
-#define IPATH_CMD_ASSIGN_PORT 23 /* allocate HCA and port */
-#define IPATH_CMD_USER_INIT 24 /* set up userspace */
-#define IPATH_CMD_UNUSED_1 25
-#define IPATH_CMD_UNUSED_2 26
-#define IPATH_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */
-#define IPATH_CMD_POLL_TYPE 28 /* set the kind of polling we want */
-#define IPATH_CMD_ARMLAUNCH_CTRL 29 /* armlaunch detection control */
-/* 30 is unused */
-#define IPATH_CMD_SDMA_INFLIGHT 31 /* sdma inflight counter request */
-#define IPATH_CMD_SDMA_COMPLETE 32 /* sdma completion counter request */
-
-/*
- * Poll types
- */
-#define IPATH_POLL_TYPE_URGENT 0x01
-#define IPATH_POLL_TYPE_OVERFLOW 0x02
-
-struct ipath_port_info {
- __u32 num_active; /* number of active units */
- __u32 unit; /* unit (chip) assigned to caller */
- __u16 port; /* port on unit assigned to caller */
- __u16 subport; /* subport on unit assigned to caller */
- __u16 num_ports; /* number of ports available on unit */
- __u16 num_subports; /* number of subports opened on port */
-};
-
-struct ipath_tid_info {
- __u32 tidcnt;
- /* make structure same size in 32 and 64 bit */
- __u32 tid__unused;
- /* virtual address of first page in transfer */
- __u64 tidvaddr;
- /* pointer (same size 32/64 bit) to __u16 tid array */
- __u64 tidlist;
-
- /*
- * pointer (same size 32/64 bit) to bitmap of TIDs used
- * for this call; checked for being large enough at open
- */
- __u64 tidmap;
-};
-
-struct ipath_cmd {
- __u32 type; /* command type */
- union {
- struct ipath_tid_info tid_info;
- struct ipath_user_info user_info;
-
- /*
- * address in userspace where we should put the sdma
- * inflight counter
- */
- __u64 sdma_inflight;
- /*
- * address in userspace where we should put the sdma
- * completion counter
- */
- __u64 sdma_complete;
- /* address in userspace of struct ipath_port_info to
- write result to */
- __u64 port_info;
- /* enable/disable receipt of packets */
- __u32 recv_ctrl;
- /* enable/disable armlaunch errors (non-zero to enable) */
- __u32 armlaunch_ctrl;
- /* partition key to set */
- __u16 part_key;
- /* user address of __u32 bitmask of active slaves */
- __u64 slave_mask_addr;
- /* type of polling we want */
- __u16 poll_type;
- } cmd;
-};
-
-struct ipath_iovec {
- /* Pointer to data, but same size 32 and 64 bit */
- __u64 iov_base;
-
- /*
- * Length of data; don't need 64 bits, but want
- * ipath_sendpkt to remain same size as before 32 bit changes, so...
- */
- __u64 iov_len;
-};
-
-/*
- * Describes a single packet for send. Each packet can have one or more
- * buffers, but the total length (exclusive of IB headers) must be less
- * than the MTU, and if using the PIO method, entire packet length,
- * including IB headers, must be less than the ipath_piosize value (words).
- * Use of this necessitates including sys/uio.h
- */
-struct __ipath_sendpkt {
- __u32 sps_flags; /* flags for packet (TBD) */
- __u32 sps_cnt; /* number of entries to use in sps_iov */
- /* array of iov's describing packet. TEMPORARY */
- struct ipath_iovec sps_iov[4];
-};
-
-/*
- * diagnostics can send a packet by "writing" one of the following
- * two structs to diag data special file
- * The first is the legacy version for backward compatibility
- */
-struct ipath_diag_pkt {
- __u32 unit;
- __u64 data;
- __u32 len;
-};
-
-/* The second diag_pkt struct is the expanded version that allows
- * more control over the packet, specifically, by allowing a custom
- * pbc (+ static rate) qword, so that special modes and deliberate
- * changes to CRCs can be used. The elements were also re-ordered
- * for better alignment and to avoid padding issues.
- */
-struct ipath_diag_xpkt {
- __u64 data;
- __u64 pbc_wd;
- __u32 unit;
- __u32 len;
-};
-
-/*
- * Data layout in I2C flash (for GUID, etc.)
- * All fields are little-endian binary unless otherwise stated
- */
-#define IPATH_FLASH_VERSION 2
-struct ipath_flash {
- /* flash layout version (IPATH_FLASH_VERSION) */
- __u8 if_fversion;
- /* checksum protecting if_length bytes */
- __u8 if_csum;
- /*
- * valid length (in use, protected by if_csum), including
- * if_fversion and if_csum themselves)
- */
- __u8 if_length;
- /* the GUID, in network order */
- __u8 if_guid[8];
- /* number of GUIDs to use, starting from if_guid */
- __u8 if_numguid;
- /* the (last 10 characters of) board serial number, in ASCII */
- char if_serial[12];
- /* board mfg date (YYYYMMDD ASCII) */
- char if_mfgdate[8];
- /* last board rework/test date (YYYYMMDD ASCII) */
- char if_testdate[8];
- /* logging of error counts, TBD */
- __u8 if_errcntp[4];
- /* powered on hours, updated at driver unload */
- __u8 if_powerhour[2];
- /* ASCII free-form comment field */
- char if_comment[32];
- /* Backwards compatible prefix for longer QLogic Serial Numbers */
- char if_sprefix[4];
- /* 82 bytes used, min flash size is 128 bytes */
- __u8 if_future[46];
-};
-
-/*
- * These are the counters implemented in the chip, and are listed in order.
- * The InterCaps naming is taken straight from the chip spec.
- */
-struct infinipath_counters {
- __u64 LBIntCnt;
- __u64 LBFlowStallCnt;
- __u64 TxSDmaDescCnt; /* was Reserved1 */
- __u64 TxUnsupVLErrCnt;
- __u64 TxDataPktCnt;
- __u64 TxFlowPktCnt;
- __u64 TxDwordCnt;
- __u64 TxLenErrCnt;
- __u64 TxMaxMinLenErrCnt;
- __u64 TxUnderrunCnt;
- __u64 TxFlowStallCnt;
- __u64 TxDroppedPktCnt;
- __u64 RxDroppedPktCnt;
- __u64 RxDataPktCnt;
- __u64 RxFlowPktCnt;
- __u64 RxDwordCnt;
- __u64 RxLenErrCnt;
- __u64 RxMaxMinLenErrCnt;
- __u64 RxICRCErrCnt;
- __u64 RxVCRCErrCnt;
- __u64 RxFlowCtrlErrCnt;
- __u64 RxBadFormatCnt;
- __u64 RxLinkProblemCnt;
- __u64 RxEBPCnt;
- __u64 RxLPCRCErrCnt;
- __u64 RxBufOvflCnt;
- __u64 RxTIDFullErrCnt;
- __u64 RxTIDValidErrCnt;
- __u64 RxPKeyMismatchCnt;
- __u64 RxP0HdrEgrOvflCnt;
- __u64 RxP1HdrEgrOvflCnt;
- __u64 RxP2HdrEgrOvflCnt;
- __u64 RxP3HdrEgrOvflCnt;
- __u64 RxP4HdrEgrOvflCnt;
- __u64 RxP5HdrEgrOvflCnt;
- __u64 RxP6HdrEgrOvflCnt;
- __u64 RxP7HdrEgrOvflCnt;
- __u64 RxP8HdrEgrOvflCnt;
- __u64 RxP9HdrEgrOvflCnt; /* was Reserved6 */
- __u64 RxP10HdrEgrOvflCnt; /* was Reserved7 */
- __u64 RxP11HdrEgrOvflCnt; /* new for IBA7220 */
- __u64 RxP12HdrEgrOvflCnt; /* new for IBA7220 */
- __u64 RxP13HdrEgrOvflCnt; /* new for IBA7220 */
- __u64 RxP14HdrEgrOvflCnt; /* new for IBA7220 */
- __u64 RxP15HdrEgrOvflCnt; /* new for IBA7220 */
- __u64 RxP16HdrEgrOvflCnt; /* new for IBA7220 */
- __u64 IBStatusChangeCnt;
- __u64 IBLinkErrRecoveryCnt;
- __u64 IBLinkDownedCnt;
- __u64 IBSymbolErrCnt;
- /* The following are new for IBA7220 */
- __u64 RxVL15DroppedPktCnt;
- __u64 RxOtherLocalPhyErrCnt;
- __u64 PcieRetryBufDiagQwordCnt;
- __u64 ExcessBufferOvflCnt;
- __u64 LocalLinkIntegrityErrCnt;
- __u64 RxVlErrCnt;
- __u64 RxDlidFltrCnt;
-};
-
-/*
- * The next set of defines are for packet headers, and chip register
- * and memory bits that are visible to and/or used by user-mode software
- * The other bits that are used only by the driver or diags are in
- * ipath_registers.h
- */
-
-/* RcvHdrFlags bits */
-#define INFINIPATH_RHF_LENGTH_MASK 0x7FF
-#define INFINIPATH_RHF_LENGTH_SHIFT 0
-#define INFINIPATH_RHF_RCVTYPE_MASK 0x7
-#define INFINIPATH_RHF_RCVTYPE_SHIFT 11
-#define INFINIPATH_RHF_EGRINDEX_MASK 0xFFF
-#define INFINIPATH_RHF_EGRINDEX_SHIFT 16
-#define INFINIPATH_RHF_SEQ_MASK 0xF
-#define INFINIPATH_RHF_SEQ_SHIFT 0
-#define INFINIPATH_RHF_HDRQ_OFFSET_MASK 0x7FF
-#define INFINIPATH_RHF_HDRQ_OFFSET_SHIFT 4
-#define INFINIPATH_RHF_H_ICRCERR 0x80000000
-#define INFINIPATH_RHF_H_VCRCERR 0x40000000
-#define INFINIPATH_RHF_H_PARITYERR 0x20000000
-#define INFINIPATH_RHF_H_LENERR 0x10000000
-#define INFINIPATH_RHF_H_MTUERR 0x08000000
-#define INFINIPATH_RHF_H_IHDRERR 0x04000000
-#define INFINIPATH_RHF_H_TIDERR 0x02000000
-#define INFINIPATH_RHF_H_MKERR 0x01000000
-#define INFINIPATH_RHF_H_IBERR 0x00800000
-#define INFINIPATH_RHF_H_ERR_MASK 0xFF800000
-#define INFINIPATH_RHF_L_USE_EGR 0x80000000
-#define INFINIPATH_RHF_L_SWA 0x00008000
-#define INFINIPATH_RHF_L_SWB 0x00004000
-
-/* infinipath header fields */
-#define INFINIPATH_I_VERS_MASK 0xF
-#define INFINIPATH_I_VERS_SHIFT 28
-#define INFINIPATH_I_PORT_MASK 0xF
-#define INFINIPATH_I_PORT_SHIFT 24
-#define INFINIPATH_I_TID_MASK 0x7FF
-#define INFINIPATH_I_TID_SHIFT 13
-#define INFINIPATH_I_OFFSET_MASK 0x1FFF
-#define INFINIPATH_I_OFFSET_SHIFT 0
-
-/* K_PktFlags bits */
-#define INFINIPATH_KPF_INTR 0x1
-#define INFINIPATH_KPF_SUBPORT_MASK 0x3
-#define INFINIPATH_KPF_SUBPORT_SHIFT 1
-
-#define INFINIPATH_MAX_SUBPORT 4
-
-/* SendPIO per-buffer control */
-#define INFINIPATH_SP_TEST 0x40
-#define INFINIPATH_SP_TESTEBP 0x20
-#define INFINIPATH_SP_TRIGGER_SHIFT 15
-
-/* SendPIOAvail bits */
-#define INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT 1
-#define INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT 0
-
-/* infinipath header format */
-struct ipath_header {
- /*
- * Version - 4 bits, Port - 4 bits, TID - 10 bits and Offset -
- * 14 bits before ECO change ~28 Dec 03. After that, Vers 4,
- * Port 4, TID 11, offset 13.
- */
- __le32 ver_port_tid_offset;
- __le16 chksum;
- __le16 pkt_flags;
-};
-
-/* infinipath user message header format.
- * This structure contains the first 4 fields common to all protocols
- * that employ infinipath.
- */
-struct ipath_message_header {
- __be16 lrh[4];
- __be32 bth[3];
- /* fields below this point are in host byte order */
- struct ipath_header iph;
- __u8 sub_opcode;
-};
-
-/* infinipath ethernet header format */
-struct ether_header {
- __be16 lrh[4];
- __be32 bth[3];
- struct ipath_header iph;
- __u8 sub_opcode;
- __u8 cmd;
- __be16 lid;
- __u16 mac[3];
- __u8 frag_num;
- __u8 seq_num;
- __le32 len;
- /* MUST be of word size due to PIO write requirements */
- __le32 csum;
- __le16 csum_offset;
- __le16 flags;
- __u16 first_2_bytes;
- __u8 unused[2]; /* currently unused */
-};
-
-
-/* IB - LRH header consts */
-#define IPATH_LRH_GRH 0x0003 /* 1. word of IB LRH - next header: GRH */
-#define IPATH_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */
-
-/* misc. */
-#define SIZE_OF_CRC 1
-
-#define IPATH_DEFAULT_P_KEY 0xFFFF
-#define IPATH_PERMISSIVE_LID 0xFFFF
-#define IPATH_AETH_CREDIT_SHIFT 24
-#define IPATH_AETH_CREDIT_MASK 0x1F
-#define IPATH_AETH_CREDIT_INVAL 0x1F
-#define IPATH_PSN_MASK 0xFFFFFF
-#define IPATH_MSN_MASK 0xFFFFFF
-#define IPATH_QPN_MASK 0xFFFFFF
-#define IPATH_MULTICAST_LID_BASE 0xC000
-#define IPATH_EAGER_TID_ID INFINIPATH_I_TID_MASK
-#define IPATH_MULTICAST_QPN 0xFFFFFF
-
-/* Receive Header Queue: receive type (from infinipath) */
-#define RCVHQ_RCV_TYPE_EXPECTED 0
-#define RCVHQ_RCV_TYPE_EAGER 1
-#define RCVHQ_RCV_TYPE_NON_KD 2
-#define RCVHQ_RCV_TYPE_ERROR 3
-
-
-/* sub OpCodes - ith4x */
-#define IPATH_ITH4X_OPCODE_ENCAP 0x81
-#define IPATH_ITH4X_OPCODE_LID_ARP 0x82
-
-#define IPATH_HEADER_QUEUE_WORDS 9
-
-/* functions for extracting fields from rcvhdrq entries for the driver.
- */
-static inline __u32 ipath_hdrget_err_flags(const __le32 * rbuf)
-{
- return __le32_to_cpu(rbuf[1]) & INFINIPATH_RHF_H_ERR_MASK;
-}
-
-static inline __u32 ipath_hdrget_rcv_type(const __le32 * rbuf)
-{
- return (__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_RCVTYPE_SHIFT)
- & INFINIPATH_RHF_RCVTYPE_MASK;
-}
-
-static inline __u32 ipath_hdrget_length_in_bytes(const __le32 * rbuf)
-{
- return ((__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_LENGTH_SHIFT)
- & INFINIPATH_RHF_LENGTH_MASK) << 2;
-}
-
-static inline __u32 ipath_hdrget_index(const __le32 * rbuf)
-{
- return (__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_EGRINDEX_SHIFT)
- & INFINIPATH_RHF_EGRINDEX_MASK;
-}
-
-static inline __u32 ipath_hdrget_seq(const __le32 *rbuf)
-{
- return (__le32_to_cpu(rbuf[1]) >> INFINIPATH_RHF_SEQ_SHIFT)
- & INFINIPATH_RHF_SEQ_MASK;
-}
-
-static inline __u32 ipath_hdrget_offset(const __le32 *rbuf)
-{
- return (__le32_to_cpu(rbuf[1]) >> INFINIPATH_RHF_HDRQ_OFFSET_SHIFT)
- & INFINIPATH_RHF_HDRQ_OFFSET_MASK;
-}
-
-static inline __u32 ipath_hdrget_use_egr_buf(const __le32 *rbuf)
-{
- return __le32_to_cpu(rbuf[0]) & INFINIPATH_RHF_L_USE_EGR;
-}
-
-static inline __u32 ipath_hdrget_ipath_ver(__le32 hdrword)
-{
- return (__le32_to_cpu(hdrword) >> INFINIPATH_I_VERS_SHIFT)
- & INFINIPATH_I_VERS_MASK;
-}
-
-#endif /* _IPATH_COMMON_H */
diff --git a/drivers/staging/rdma/ipath/ipath_cq.c b/drivers/staging/rdma/ipath/ipath_cq.c
deleted file mode 100644
index e9dd9112e718..000000000000
--- a/drivers/staging/rdma/ipath/ipath_cq.c
+++ /dev/null
@@ -1,483 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include "ipath_verbs.h"
-
-/**
- * ipath_cq_enter - add a new entry to the completion queue
- * @cq: completion queue
- * @entry: work completion entry to add
- * @sig: true if @entry is a solicitated entry
- *
- * This may be called with qp->s_lock held.
- */
-void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
-{
- struct ipath_cq_wc *wc;
- unsigned long flags;
- u32 head;
- u32 next;
-
- spin_lock_irqsave(&cq->lock, flags);
-
- /*
- * Note that the head pointer might be writable by user processes.
- * Take care to verify it is a sane value.
- */
- wc = cq->queue;
- head = wc->head;
- if (head >= (unsigned) cq->ibcq.cqe) {
- head = cq->ibcq.cqe;
- next = 0;
- } else
- next = head + 1;
- if (unlikely(next == wc->tail)) {
- spin_unlock_irqrestore(&cq->lock, flags);
- if (cq->ibcq.event_handler) {
- struct ib_event ev;
-
- ev.device = cq->ibcq.device;
- ev.element.cq = &cq->ibcq;
- ev.event = IB_EVENT_CQ_ERR;
- cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
- }
- return;
- }
- if (cq->ip) {
- wc->uqueue[head].wr_id = entry->wr_id;
- wc->uqueue[head].status = entry->status;
- wc->uqueue[head].opcode = entry->opcode;
- wc->uqueue[head].vendor_err = entry->vendor_err;
- wc->uqueue[head].byte_len = entry->byte_len;
- wc->uqueue[head].ex.imm_data = (__u32 __force) entry->ex.imm_data;
- wc->uqueue[head].qp_num = entry->qp->qp_num;
- wc->uqueue[head].src_qp = entry->src_qp;
- wc->uqueue[head].wc_flags = entry->wc_flags;
- wc->uqueue[head].pkey_index = entry->pkey_index;
- wc->uqueue[head].slid = entry->slid;
- wc->uqueue[head].sl = entry->sl;
- wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
- wc->uqueue[head].port_num = entry->port_num;
- /* Make sure entry is written before the head index. */
- smp_wmb();
- } else
- wc->kqueue[head] = *entry;
- wc->head = next;
-
- if (cq->notify == IB_CQ_NEXT_COMP ||
- (cq->notify == IB_CQ_SOLICITED && solicited)) {
- cq->notify = IB_CQ_NONE;
- cq->triggered++;
- /*
- * This will cause send_complete() to be called in
- * another thread.
- */
- tasklet_hi_schedule(&cq->comptask);
- }
-
- spin_unlock_irqrestore(&cq->lock, flags);
-
- if (entry->status != IB_WC_SUCCESS)
- to_idev(cq->ibcq.device)->n_wqe_errs++;
-}
-
-/**
- * ipath_poll_cq - poll for work completion entries
- * @ibcq: the completion queue to poll
- * @num_entries: the maximum number of entries to return
- * @entry: pointer to array where work completions are placed
- *
- * Returns the number of completion entries polled.
- *
- * This may be called from interrupt context. Also called by ib_poll_cq()
- * in the generic verbs code.
- */
-int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
-{
- struct ipath_cq *cq = to_icq(ibcq);
- struct ipath_cq_wc *wc;
- unsigned long flags;
- int npolled;
- u32 tail;
-
- /* The kernel can only poll a kernel completion queue */
- if (cq->ip) {
- npolled = -EINVAL;
- goto bail;
- }
-
- spin_lock_irqsave(&cq->lock, flags);
-
- wc = cq->queue;
- tail = wc->tail;
- if (tail > (u32) cq->ibcq.cqe)
- tail = (u32) cq->ibcq.cqe;
- for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
- if (tail == wc->head)
- break;
- /* The kernel doesn't need a RMB since it has the lock. */
- *entry = wc->kqueue[tail];
- if (tail >= cq->ibcq.cqe)
- tail = 0;
- else
- tail++;
- }
- wc->tail = tail;
-
- spin_unlock_irqrestore(&cq->lock, flags);
-
-bail:
- return npolled;
-}
-
-static void send_complete(unsigned long data)
-{
- struct ipath_cq *cq = (struct ipath_cq *)data;
-
- /*
- * The completion handler will most likely rearm the notification
- * and poll for all pending entries. If a new completion entry
- * is added while we are in this routine, tasklet_hi_schedule()
- * won't call us again until we return so we check triggered to
- * see if we need to call the handler again.
- */
- for (;;) {
- u8 triggered = cq->triggered;
-
- cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
-
- if (cq->triggered == triggered)
- return;
- }
-}
-
-/**
- * ipath_create_cq - create a completion queue
- * @ibdev: the device this completion queue is attached to
- * @attr: creation attributes
- * @context: unused by the InfiniPath driver
- * @udata: unused by the InfiniPath driver
- *
- * Returns a pointer to the completion queue or negative errno values
- * for failure.
- *
- * Called by ib_create_cq() in the generic verbs code.
- */
-struct ib_cq *ipath_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata)
-{
- int entries = attr->cqe;
- struct ipath_ibdev *dev = to_idev(ibdev);
- struct ipath_cq *cq;
- struct ipath_cq_wc *wc;
- struct ib_cq *ret;
- u32 sz;
-
- if (attr->flags)
- return ERR_PTR(-EINVAL);
-
- if (entries < 1 || entries > ib_ipath_max_cqes) {
- ret = ERR_PTR(-EINVAL);
- goto done;
- }
-
- /* Allocate the completion queue structure. */
- cq = kmalloc(sizeof(*cq), GFP_KERNEL);
- if (!cq) {
- ret = ERR_PTR(-ENOMEM);
- goto done;
- }
-
- /*
- * Allocate the completion queue entries and head/tail pointers.
- * This is allocated separately so that it can be resized and
- * also mapped into user space.
- * We need to use vmalloc() in order to support mmap and large
- * numbers of entries.
- */
- sz = sizeof(*wc);
- if (udata && udata->outlen >= sizeof(__u64))
- sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
- else
- sz += sizeof(struct ib_wc) * (entries + 1);
- wc = vmalloc_user(sz);
- if (!wc) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_cq;
- }
-
- /*
- * Return the address of the WC as the offset to mmap.
- * See ipath_mmap() for details.
- */
- if (udata && udata->outlen >= sizeof(__u64)) {
- int err;
-
- cq->ip = ipath_create_mmap_info(dev, sz, context, wc);
- if (!cq->ip) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_wc;
- }
-
- err = ib_copy_to_udata(udata, &cq->ip->offset,
- sizeof(cq->ip->offset));
- if (err) {
- ret = ERR_PTR(err);
- goto bail_ip;
- }
- } else
- cq->ip = NULL;
-
- spin_lock(&dev->n_cqs_lock);
- if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
- spin_unlock(&dev->n_cqs_lock);
- ret = ERR_PTR(-ENOMEM);
- goto bail_ip;
- }
-
- dev->n_cqs_allocated++;
- spin_unlock(&dev->n_cqs_lock);
-
- if (cq->ip) {
- spin_lock_irq(&dev->pending_lock);
- list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
- }
-
- /*
- * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
- * The number of entries should be >= the number requested or return
- * an error.
- */
- cq->ibcq.cqe = entries;
- cq->notify = IB_CQ_NONE;
- cq->triggered = 0;
- spin_lock_init(&cq->lock);
- tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
- wc->head = 0;
- wc->tail = 0;
- cq->queue = wc;
-
- ret = &cq->ibcq;
-
- goto done;
-
-bail_ip:
- kfree(cq->ip);
-bail_wc:
- vfree(wc);
-bail_cq:
- kfree(cq);
-done:
- return ret;
-}
-
-/**
- * ipath_destroy_cq - destroy a completion queue
- * @ibcq: the completion queue to destroy.
- *
- * Returns 0 for success.
- *
- * Called by ib_destroy_cq() in the generic verbs code.
- */
-int ipath_destroy_cq(struct ib_cq *ibcq)
-{
- struct ipath_ibdev *dev = to_idev(ibcq->device);
- struct ipath_cq *cq = to_icq(ibcq);
-
- tasklet_kill(&cq->comptask);
- spin_lock(&dev->n_cqs_lock);
- dev->n_cqs_allocated--;
- spin_unlock(&dev->n_cqs_lock);
- if (cq->ip)
- kref_put(&cq->ip->ref, ipath_release_mmap_info);
- else
- vfree(cq->queue);
- kfree(cq);
-
- return 0;
-}
-
-/**
- * ipath_req_notify_cq - change the notification type for a completion queue
- * @ibcq: the completion queue
- * @notify_flags: the type of notification to request
- *
- * Returns 0 for success.
- *
- * This may be called from interrupt context. Also called by
- * ib_req_notify_cq() in the generic verbs code.
- */
-int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
-{
- struct ipath_cq *cq = to_icq(ibcq);
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&cq->lock, flags);
- /*
- * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
- * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
- */
- if (cq->notify != IB_CQ_NEXT_COMP)
- cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
-
- if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
- cq->queue->head != cq->queue->tail)
- ret = 1;
-
- spin_unlock_irqrestore(&cq->lock, flags);
-
- return ret;
-}
-
-/**
- * ipath_resize_cq - change the size of the CQ
- * @ibcq: the completion queue
- *
- * Returns 0 for success.
- */
-int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
-{
- struct ipath_cq *cq = to_icq(ibcq);
- struct ipath_cq_wc *old_wc;
- struct ipath_cq_wc *wc;
- u32 head, tail, n;
- int ret;
- u32 sz;
-
- if (cqe < 1 || cqe > ib_ipath_max_cqes) {
- ret = -EINVAL;
- goto bail;
- }
-
- /*
- * Need to use vmalloc() if we want to support large #s of entries.
- */
- sz = sizeof(*wc);
- if (udata && udata->outlen >= sizeof(__u64))
- sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
- else
- sz += sizeof(struct ib_wc) * (cqe + 1);
- wc = vmalloc_user(sz);
- if (!wc) {
- ret = -ENOMEM;
- goto bail;
- }
-
- /* Check that we can write the offset to mmap. */
- if (udata && udata->outlen >= sizeof(__u64)) {
- __u64 offset = 0;
-
- ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
- if (ret)
- goto bail_free;
- }
-
- spin_lock_irq(&cq->lock);
- /*
- * Make sure head and tail are sane since they
- * might be user writable.
- */
- old_wc = cq->queue;
- head = old_wc->head;
- if (head > (u32) cq->ibcq.cqe)
- head = (u32) cq->ibcq.cqe;
- tail = old_wc->tail;
- if (tail > (u32) cq->ibcq.cqe)
- tail = (u32) cq->ibcq.cqe;
- if (head < tail)
- n = cq->ibcq.cqe + 1 + head - tail;
- else
- n = head - tail;
- if (unlikely((u32)cqe < n)) {
- ret = -EINVAL;
- goto bail_unlock;
- }
- for (n = 0; tail != head; n++) {
- if (cq->ip)
- wc->uqueue[n] = old_wc->uqueue[tail];
- else
- wc->kqueue[n] = old_wc->kqueue[tail];
- if (tail == (u32) cq->ibcq.cqe)
- tail = 0;
- else
- tail++;
- }
- cq->ibcq.cqe = cqe;
- wc->head = n;
- wc->tail = 0;
- cq->queue = wc;
- spin_unlock_irq(&cq->lock);
-
- vfree(old_wc);
-
- if (cq->ip) {
- struct ipath_ibdev *dev = to_idev(ibcq->device);
- struct ipath_mmap_info *ip = cq->ip;
-
- ipath_update_mmap_info(dev, ip, sz, wc);
-
- /*
- * Return the offset to mmap.
- * See ipath_mmap() for details.
- */
- if (udata && udata->outlen >= sizeof(__u64)) {
- ret = ib_copy_to_udata(udata, &ip->offset,
- sizeof(ip->offset));
- if (ret)
- goto bail;
- }
-
- spin_lock_irq(&dev->pending_lock);
- if (list_empty(&ip->pending_mmaps))
- list_add(&ip->pending_mmaps, &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
- }
-
- ret = 0;
- goto bail;
-
-bail_unlock:
- spin_unlock_irq(&cq->lock);
-bail_free:
- vfree(wc);
-bail:
- return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_debug.h b/drivers/staging/rdma/ipath/ipath_debug.h
deleted file mode 100644
index 65926cd35759..000000000000
--- a/drivers/staging/rdma/ipath/ipath_debug.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _IPATH_DEBUG_H
-#define _IPATH_DEBUG_H
-
-#ifndef _IPATH_DEBUGGING /* debugging enabled or not */
-#define _IPATH_DEBUGGING 1
-#endif
-
-#if _IPATH_DEBUGGING
-
-/*
- * Mask values for debugging. The scheme allows us to compile out any
- * of the debug tracing stuff, and if compiled in, to enable or disable
- * dynamically. This can be set at modprobe time also:
- * modprobe infinipath.ko infinipath_debug=7
- */
-
-#define __IPATH_INFO 0x1 /* generic low verbosity stuff */
-#define __IPATH_DBG 0x2 /* generic debug */
-#define __IPATH_TRSAMPLE 0x8 /* generate trace buffer sample entries */
-/* leave some low verbosity spots open */
-#define __IPATH_VERBDBG 0x40 /* very verbose debug */
-#define __IPATH_PKTDBG 0x80 /* print packet data */
-/* print process startup (init)/exit messages */
-#define __IPATH_PROCDBG 0x100
-/* print mmap/fault stuff, not using VDBG any more */
-#define __IPATH_MMDBG 0x200
-#define __IPATH_ERRPKTDBG 0x400
-#define __IPATH_USER_SEND 0x1000 /* use user mode send */
-#define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */
-#define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */
-#define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) gen debug */
-#define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings */
-#define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors */
-#define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump */
-#define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump */
-#define __IPATH_LINKVERBDBG 0x200000 /* very verbose linkchange debug */
-
-#else /* _IPATH_DEBUGGING */
-
-/*
- * define all of these even with debugging off, for the few places that do
- * if(infinipath_debug & _IPATH_xyzzy), but in a way that will make the
- * compiler eliminate the code
- */
-
-#define __IPATH_INFO 0x0 /* generic low verbosity stuff */
-#define __IPATH_DBG 0x0 /* generic debug */
-#define __IPATH_TRSAMPLE 0x0 /* generate trace buffer sample entries */
-#define __IPATH_VERBDBG 0x0 /* very verbose debug */
-#define __IPATH_PKTDBG 0x0 /* print packet data */
-#define __IPATH_PROCDBG 0x0 /* process startup (init)/exit messages */
-/* print mmap/fault stuff, not using VDBG any more */
-#define __IPATH_MMDBG 0x0
-#define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */
-#define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */
-#define __IPATH_IPATHWARN 0x0 /* Ethernet (IPATH) warnings on */
-#define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */
-#define __IPATH_IPATHPD 0x0 /* Ethernet (IPATH) packet dump on */
-#define __IPATH_IPATHTABLE 0x0 /* Ethernet (IPATH) packet dump on */
-#define __IPATH_LINKVERBDBG 0x0 /* very verbose linkchange debug */
-
-#endif /* _IPATH_DEBUGGING */
-
-#define __IPATH_VERBOSEDBG __IPATH_VERBDBG
-
-#endif /* _IPATH_DEBUG_H */
diff --git a/drivers/staging/rdma/ipath/ipath_diag.c b/drivers/staging/rdma/ipath/ipath_diag.c
deleted file mode 100644
index 45802e97332e..000000000000
--- a/drivers/staging/rdma/ipath/ipath_diag.c
+++ /dev/null
@@ -1,551 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file contains support for diagnostic functions. It is accessed by
- * opening the ipath_diag device, normally minor number 129. Diagnostic use
- * of the InfiniPath chip may render the chip or board unusable until the
- * driver is unloaded, or in some cases, until the system is rebooted.
- *
- * Accesses to the chip through this interface are not similar to going
- * through the /sys/bus/pci resource mmap interface.
- */
-
-#include <linux/io.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/export.h>
-#include <asm/uaccess.h>
-
-#include "ipath_kernel.h"
-#include "ipath_common.h"
-
-int ipath_diag_inuse;
-static int diag_set_link;
-
-static int ipath_diag_open(struct inode *in, struct file *fp);
-static int ipath_diag_release(struct inode *in, struct file *fp);
-static ssize_t ipath_diag_read(struct file *fp, char __user *data,
- size_t count, loff_t *off);
-static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off);
-
-static const struct file_operations diag_file_ops = {
- .owner = THIS_MODULE,
- .write = ipath_diag_write,
- .read = ipath_diag_read,
- .open = ipath_diag_open,
- .release = ipath_diag_release,
- .llseek = default_llseek,
-};
-
-static ssize_t ipath_diagpkt_write(struct file *fp,
- const char __user *data,
- size_t count, loff_t *off);
-
-static const struct file_operations diagpkt_file_ops = {
- .owner = THIS_MODULE,
- .write = ipath_diagpkt_write,
- .llseek = noop_llseek,
-};
-
-static atomic_t diagpkt_count = ATOMIC_INIT(0);
-static struct cdev *diagpkt_cdev;
-static struct device *diagpkt_dev;
-
-int ipath_diag_add(struct ipath_devdata *dd)
-{
- char name[16];
- int ret = 0;
-
- if (atomic_inc_return(&diagpkt_count) == 1) {
- ret = ipath_cdev_init(IPATH_DIAGPKT_MINOR,
- "ipath_diagpkt", &diagpkt_file_ops,
- &diagpkt_cdev, &diagpkt_dev);
-
- if (ret) {
- ipath_dev_err(dd, "Couldn't create ipath_diagpkt "
- "device: %d", ret);
- goto done;
- }
- }
-
- snprintf(name, sizeof(name), "ipath_diag%d", dd->ipath_unit);
-
- ret = ipath_cdev_init(IPATH_DIAG_MINOR_BASE + dd->ipath_unit, name,
- &diag_file_ops, &dd->diag_cdev,
- &dd->diag_dev);
- if (ret)
- ipath_dev_err(dd, "Couldn't create %s device: %d",
- name, ret);
-
-done:
- return ret;
-}
-
-void ipath_diag_remove(struct ipath_devdata *dd)
-{
- if (atomic_dec_and_test(&diagpkt_count))
- ipath_cdev_cleanup(&diagpkt_cdev, &diagpkt_dev);
-
- ipath_cdev_cleanup(&dd->diag_cdev, &dd->diag_dev);
-}
-
-/**
- * ipath_read_umem64 - read a 64-bit quantity from the chip into user space
- * @dd: the infinipath device
- * @uaddr: the location to store the data in user memory
- * @caddr: the source chip address (full pointer, not offset)
- * @count: number of bytes to copy (multiple of 32 bits)
- *
- * This function also localizes all chip memory accesses.
- * The copy should be written such that we read full cacheline packets
- * from the chip. This is usually used for a single qword
- *
- * NOTE: This assumes the chip address is 64-bit aligned.
- */
-static int ipath_read_umem64(struct ipath_devdata *dd, void __user *uaddr,
- const void __iomem *caddr, size_t count)
-{
- const u64 __iomem *reg_addr = caddr;
- const u64 __iomem *reg_end = reg_addr + (count / sizeof(u64));
- int ret;
-
- /* not very efficient, but it works for now */
- if (reg_addr < dd->ipath_kregbase || reg_end > dd->ipath_kregend) {
- ret = -EINVAL;
- goto bail;
- }
- while (reg_addr < reg_end) {
- u64 data = readq(reg_addr);
- if (copy_to_user(uaddr, &data, sizeof(u64))) {
- ret = -EFAULT;
- goto bail;
- }
- reg_addr++;
- uaddr += sizeof(u64);
- }
- ret = 0;
-bail:
- return ret;
-}
-
-/**
- * ipath_write_umem64 - write a 64-bit quantity to the chip from user space
- * @dd: the infinipath device
- * @caddr: the destination chip address (full pointer, not offset)
- * @uaddr: the source of the data in user memory
- * @count: the number of bytes to copy (multiple of 32 bits)
- *
- * This is usually used for a single qword
- * NOTE: This assumes the chip address is 64-bit aligned.
- */
-
-static int ipath_write_umem64(struct ipath_devdata *dd, void __iomem *caddr,
- const void __user *uaddr, size_t count)
-{
- u64 __iomem *reg_addr = caddr;
- const u64 __iomem *reg_end = reg_addr + (count / sizeof(u64));
- int ret;
-
- /* not very efficient, but it works for now */
- if (reg_addr < dd->ipath_kregbase || reg_end > dd->ipath_kregend) {
- ret = -EINVAL;
- goto bail;
- }
- while (reg_addr < reg_end) {
- u64 data;
- if (copy_from_user(&data, uaddr, sizeof(data))) {
- ret = -EFAULT;
- goto bail;
- }
- writeq(data, reg_addr);
-
- reg_addr++;
- uaddr += sizeof(u64);
- }
- ret = 0;
-bail:
- return ret;
-}
-
-/**
- * ipath_read_umem32 - read a 32-bit quantity from the chip into user space
- * @dd: the infinipath device
- * @uaddr: the location to store the data in user memory
- * @caddr: the source chip address (full pointer, not offset)
- * @count: number of bytes to copy
- *
- * read 32 bit values, not 64 bit; for memories that only
- * support 32 bit reads; usually a single dword.
- */
-static int ipath_read_umem32(struct ipath_devdata *dd, void __user *uaddr,
- const void __iomem *caddr, size_t count)
-{
- const u32 __iomem *reg_addr = caddr;
- const u32 __iomem *reg_end = reg_addr + (count / sizeof(u32));
- int ret;
-
- if (reg_addr < (u32 __iomem *) dd->ipath_kregbase ||
- reg_end > (u32 __iomem *) dd->ipath_kregend) {
- ret = -EINVAL;
- goto bail;
- }
- /* not very efficient, but it works for now */
- while (reg_addr < reg_end) {
- u32 data = readl(reg_addr);
- if (copy_to_user(uaddr, &data, sizeof(data))) {
- ret = -EFAULT;
- goto bail;
- }
-
- reg_addr++;
- uaddr += sizeof(u32);
-
- }
- ret = 0;
-bail:
- return ret;
-}
-
-/**
- * ipath_write_umem32 - write a 32-bit quantity to the chip from user space
- * @dd: the infinipath device
- * @caddr: the destination chip address (full pointer, not offset)
- * @uaddr: the source of the data in user memory
- * @count: number of bytes to copy
- *
- * write 32 bit values, not 64 bit; for memories that only
- * support 32 bit write; usually a single dword.
- */
-
-static int ipath_write_umem32(struct ipath_devdata *dd, void __iomem *caddr,
- const void __user *uaddr, size_t count)
-{
- u32 __iomem *reg_addr = caddr;
- const u32 __iomem *reg_end = reg_addr + (count / sizeof(u32));
- int ret;
-
- if (reg_addr < (u32 __iomem *) dd->ipath_kregbase ||
- reg_end > (u32 __iomem *) dd->ipath_kregend) {
- ret = -EINVAL;
- goto bail;
- }
- while (reg_addr < reg_end) {
- u32 data;
- if (copy_from_user(&data, uaddr, sizeof(data))) {
- ret = -EFAULT;
- goto bail;
- }
- writel(data, reg_addr);
-
- reg_addr++;
- uaddr += sizeof(u32);
- }
- ret = 0;
-bail:
- return ret;
-}
-
-static int ipath_diag_open(struct inode *in, struct file *fp)
-{
- int unit = iminor(in) - IPATH_DIAG_MINOR_BASE;
- struct ipath_devdata *dd;
- int ret;
-
- mutex_lock(&ipath_mutex);
-
- if (ipath_diag_inuse) {
- ret = -EBUSY;
- goto bail;
- }
-
- dd = ipath_lookup(unit);
-
- if (dd == NULL || !(dd->ipath_flags & IPATH_PRESENT) ||
- !dd->ipath_kregbase) {
- ret = -ENODEV;
- goto bail;
- }
-
- fp->private_data = dd;
- ipath_diag_inuse = -2;
- diag_set_link = 0;
- ret = 0;
-
- /* Only expose a way to reset the device if we
- make it into diag mode. */
- ipath_expose_reset(&dd->pcidev->dev);
-
-bail:
- mutex_unlock(&ipath_mutex);
-
- return ret;
-}
-
-/**
- * ipath_diagpkt_write - write an IB packet
- * @fp: the diag data device file pointer
- * @data: ipath_diag_pkt structure saying where to get the packet
- * @count: size of data to write
- * @off: unused by this code
- */
-static ssize_t ipath_diagpkt_write(struct file *fp,
- const char __user *data,
- size_t count, loff_t *off)
-{
- u32 __iomem *piobuf;
- u32 plen, pbufn, maxlen_reserve;
- struct ipath_diag_pkt odp;
- struct ipath_diag_xpkt dp;
- u32 *tmpbuf = NULL;
- struct ipath_devdata *dd;
- ssize_t ret = 0;
- u64 val;
- u32 l_state, lt_state; /* LinkState, LinkTrainingState */
-
-
- if (count == sizeof(dp)) {
- if (copy_from_user(&dp, data, sizeof(dp))) {
- ret = -EFAULT;
- goto bail;
- }
- } else if (count == sizeof(odp)) {
- if (copy_from_user(&odp, data, sizeof(odp))) {
- ret = -EFAULT;
- goto bail;
- }
- dp.len = odp.len;
- dp.unit = odp.unit;
- dp.data = odp.data;
- dp.pbc_wd = 0;
- } else {
- ret = -EINVAL;
- goto bail;
- }
-
- /* send count must be an exact number of dwords */
- if (dp.len & 3) {
- ret = -EINVAL;
- goto bail;
- }
-
- plen = dp.len >> 2;
-
- dd = ipath_lookup(dp.unit);
- if (!dd || !(dd->ipath_flags & IPATH_PRESENT) ||
- !dd->ipath_kregbase) {
- ipath_cdbg(VERBOSE, "illegal unit %u for diag data send\n",
- dp.unit);
- ret = -ENODEV;
- goto bail;
- }
-
- if (ipath_diag_inuse && !diag_set_link &&
- !(dd->ipath_flags & IPATH_LINKACTIVE)) {
- diag_set_link = 1;
- ipath_cdbg(VERBOSE, "Trying to set to set link active for "
- "diag pkt\n");
- ipath_set_linkstate(dd, IPATH_IB_LINKARM);
- ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE);
- }
-
- if (!(dd->ipath_flags & IPATH_INITTED)) {
- /* no hardware, freeze, etc. */
- ipath_cdbg(VERBOSE, "unit %u not usable\n", dd->ipath_unit);
- ret = -ENODEV;
- goto bail;
- }
- /*
- * Want to skip check for l_state if using custom PBC,
- * because we might be trying to force an SM packet out.
- * first-cut, skip _all_ state checking in that case.
- */
- val = ipath_ib_state(dd, dd->ipath_lastibcstat);
- lt_state = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
- l_state = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);
- if (!dp.pbc_wd && (lt_state != INFINIPATH_IBCS_LT_STATE_LINKUP ||
- (val != dd->ib_init && val != dd->ib_arm &&
- val != dd->ib_active))) {
- ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n",
- dd->ipath_unit, (unsigned long long) val);
- ret = -EINVAL;
- goto bail;
- }
-
- /*
- * need total length before first word written, plus 2 Dwords. One Dword
- * is for padding so we get the full user data when not aligned on
- * a word boundary. The other Dword is to make sure we have room for the
- * ICRC which gets tacked on later.
- */
- maxlen_reserve = 2 * sizeof(u32);
- if (dp.len > dd->ipath_ibmaxlen - maxlen_reserve) {
- ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n",
- dp.len, dd->ipath_ibmaxlen);
- ret = -EINVAL;
- goto bail;
- }
-
- plen = sizeof(u32) + dp.len;
-
- tmpbuf = vmalloc(plen);
- if (!tmpbuf) {
- dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, "
- "failing\n");
- ret = -ENOMEM;
- goto bail;
- }
-
- if (copy_from_user(tmpbuf,
- (const void __user *) (unsigned long) dp.data,
- dp.len)) {
- ret = -EFAULT;
- goto bail;
- }
-
- plen >>= 2; /* in dwords */
-
- piobuf = ipath_getpiobuf(dd, plen, &pbufn);
- if (!piobuf) {
- ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n",
- dd->ipath_unit);
- ret = -EBUSY;
- goto bail;
- }
- /* disarm it just to be extra sure */
- ipath_disarm_piobufs(dd, pbufn, 1);
-
- if (ipath_debug & __IPATH_PKTDBG)
- ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n",
- dd->ipath_unit, plen - 1, pbufn);
-
- if (dp.pbc_wd == 0)
- dp.pbc_wd = plen;
- writeq(dp.pbc_wd, piobuf);
- /*
- * Copy all by the trigger word, then flush, so it's written
- * to chip before trigger word, then write trigger word, then
- * flush again, so packet is sent.
- */
- if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
- ipath_flush_wc();
- __iowrite32_copy(piobuf + 2, tmpbuf, plen - 1);
- ipath_flush_wc();
- __raw_writel(tmpbuf[plen - 1], piobuf + plen + 1);
- } else
- __iowrite32_copy(piobuf + 2, tmpbuf, plen);
-
- ipath_flush_wc();
-
- ret = sizeof(dp);
-
-bail:
- vfree(tmpbuf);
- return ret;
-}
-
-static int ipath_diag_release(struct inode *in, struct file *fp)
-{
- mutex_lock(&ipath_mutex);
- ipath_diag_inuse = 0;
- fp->private_data = NULL;
- mutex_unlock(&ipath_mutex);
- return 0;
-}
-
-static ssize_t ipath_diag_read(struct file *fp, char __user *data,
- size_t count, loff_t *off)
-{
- struct ipath_devdata *dd = fp->private_data;
- void __iomem *kreg_base;
- ssize_t ret;
-
- kreg_base = dd->ipath_kregbase;
-
- if (count == 0)
- ret = 0;
- else if ((count % 4) || (*off % 4))
- /* address or length is not 32-bit aligned, hence invalid */
- ret = -EINVAL;
- else if (ipath_diag_inuse < 1 && (*off || count != 8))
- ret = -EINVAL; /* prevent cat /dev/ipath_diag* */
- else if ((count % 8) || (*off % 8))
- /* address or length not 64-bit aligned; do 32-bit reads */
- ret = ipath_read_umem32(dd, data, kreg_base + *off, count);
- else
- ret = ipath_read_umem64(dd, data, kreg_base + *off, count);
-
- if (ret >= 0) {
- *off += count;
- ret = count;
- if (ipath_diag_inuse == -2)
- ipath_diag_inuse++;
- }
-
- return ret;
-}
-
-static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off)
-{
- struct ipath_devdata *dd = fp->private_data;
- void __iomem *kreg_base;
- ssize_t ret;
-
- kreg_base = dd->ipath_kregbase;
-
- if (count == 0)
- ret = 0;
- else if ((count % 4) || (*off % 4))
- /* address or length is not 32-bit aligned, hence invalid */
- ret = -EINVAL;
- else if ((ipath_diag_inuse == -1 && (*off || count != 8)) ||
- ipath_diag_inuse == -2) /* read qw off 0, write qw off 0 */
- ret = -EINVAL; /* before any other write allowed */
- else if ((count % 8) || (*off % 8))
- /* address or length not 64-bit aligned; do 32-bit writes */
- ret = ipath_write_umem32(dd, kreg_base + *off, data, count);
- else
- ret = ipath_write_umem64(dd, kreg_base + *off, data, count);
-
- if (ret >= 0) {
- *off += count;
- ret = count;
- if (ipath_diag_inuse == -1)
- ipath_diag_inuse = 1; /* all read/write OK now */
- }
-
- return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_dma.c b/drivers/staging/rdma/ipath/ipath_dma.c
deleted file mode 100644
index 123a8c053539..000000000000
--- a/drivers/staging/rdma/ipath/ipath_dma.c
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Copyright (c) 2006 QLogic, Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/scatterlist.h>
-#include <linux/gfp.h>
-#include <rdma/ib_verbs.h>
-
-#include "ipath_verbs.h"
-
-#define BAD_DMA_ADDRESS ((u64) 0)
-
-/*
- * The following functions implement driver specific replacements
- * for the ib_dma_*() functions.
- *
- * These functions return kernel virtual addresses instead of
- * device bus addresses since the driver uses the CPU to copy
- * data instead of using hardware DMA.
- */
-
-static int ipath_mapping_error(struct ib_device *dev, u64 dma_addr)
-{
- return dma_addr == BAD_DMA_ADDRESS;
-}
-
-static u64 ipath_dma_map_single(struct ib_device *dev,
- void *cpu_addr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- return (u64) cpu_addr;
-}
-
-static void ipath_dma_unmap_single(struct ib_device *dev,
- u64 addr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
-}
-
-static u64 ipath_dma_map_page(struct ib_device *dev,
- struct page *page,
- unsigned long offset,
- size_t size,
- enum dma_data_direction direction)
-{
- u64 addr;
-
- BUG_ON(!valid_dma_direction(direction));
-
- if (offset + size > PAGE_SIZE) {
- addr = BAD_DMA_ADDRESS;
- goto done;
- }
-
- addr = (u64) page_address(page);
- if (addr)
- addr += offset;
- /* TODO: handle highmem pages */
-
-done:
- return addr;
-}
-
-static void ipath_dma_unmap_page(struct ib_device *dev,
- u64 addr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
-}
-
-static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction direction)
-{
- struct scatterlist *sg;
- u64 addr;
- int i;
- int ret = nents;
-
- BUG_ON(!valid_dma_direction(direction));
-
- for_each_sg(sgl, sg, nents, i) {
- addr = (u64) page_address(sg_page(sg));
- /* TODO: handle highmem pages */
- if (!addr) {
- ret = 0;
- break;
- }
- sg->dma_address = addr + sg->offset;
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
- sg->dma_length = sg->length;
-#endif
- }
- return ret;
-}
-
-static void ipath_unmap_sg(struct ib_device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
-}
-
-static void ipath_sync_single_for_cpu(struct ib_device *dev,
- u64 addr,
- size_t size,
- enum dma_data_direction dir)
-{
-}
-
-static void ipath_sync_single_for_device(struct ib_device *dev,
- u64 addr,
- size_t size,
- enum dma_data_direction dir)
-{
-}
-
-static void *ipath_dma_alloc_coherent(struct ib_device *dev, size_t size,
- u64 *dma_handle, gfp_t flag)
-{
- struct page *p;
- void *addr = NULL;
-
- p = alloc_pages(flag, get_order(size));
- if (p)
- addr = page_address(p);
- if (dma_handle)
- *dma_handle = (u64) addr;
- return addr;
-}
-
-static void ipath_dma_free_coherent(struct ib_device *dev, size_t size,
- void *cpu_addr, u64 dma_handle)
-{
- free_pages((unsigned long) cpu_addr, get_order(size));
-}
-
-struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
- .mapping_error = ipath_mapping_error,
- .map_single = ipath_dma_map_single,
- .unmap_single = ipath_dma_unmap_single,
- .map_page = ipath_dma_map_page,
- .unmap_page = ipath_dma_unmap_page,
- .map_sg = ipath_map_sg,
- .unmap_sg = ipath_unmap_sg,
- .sync_single_for_cpu = ipath_sync_single_for_cpu,
- .sync_single_for_device = ipath_sync_single_for_device,
- .alloc_coherent = ipath_dma_alloc_coherent,
- .free_coherent = ipath_dma_free_coherent
-};
diff --git a/drivers/staging/rdma/ipath/ipath_driver.c b/drivers/staging/rdma/ipath/ipath_driver.c
deleted file mode 100644
index 2ab22f98e3ba..000000000000
--- a/drivers/staging/rdma/ipath/ipath_driver.c
+++ /dev/null
@@ -1,2784 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/spinlock.h>
-#include <linux/idr.h>
-#include <linux/pci.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/vmalloc.h>
-#include <linux/bitmap.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#ifdef CONFIG_X86_64
-#include <asm/pat.h>
-#endif
-
-#include "ipath_kernel.h"
-#include "ipath_verbs.h"
-
-static void ipath_update_pio_bufs(struct ipath_devdata *);
-
-const char *ipath_get_unit_name(int unit)
-{
- static char iname[16];
- snprintf(iname, sizeof iname, "infinipath%u", unit);
- return iname;
-}
-
-#define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: "
-#define PFX IPATH_DRV_NAME ": "
-
-/*
- * The size has to be longer than this string, so we can append
- * board/chip information to it in the init code.
- */
-const char ib_ipath_version[] = IPATH_IDSTR "\n";
-
-static struct idr unit_table;
-DEFINE_SPINLOCK(ipath_devs_lock);
-LIST_HEAD(ipath_dev_list);
-
-wait_queue_head_t ipath_state_wait;
-
-unsigned ipath_debug = __IPATH_INFO;
-
-module_param_named(debug, ipath_debug, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(debug, "mask for debug prints");
-EXPORT_SYMBOL_GPL(ipath_debug);
-
-unsigned ipath_mtu4096 = 1; /* max 4KB IB mtu by default, if supported */
-module_param_named(mtu4096, ipath_mtu4096, uint, S_IRUGO);
-MODULE_PARM_DESC(mtu4096, "enable MTU of 4096 bytes, if supported");
-
-static unsigned ipath_hol_timeout_ms = 13000;
-module_param_named(hol_timeout_ms, ipath_hol_timeout_ms, uint, S_IRUGO);
-MODULE_PARM_DESC(hol_timeout_ms,
- "duration of user app suspension after link failure");
-
-unsigned ipath_linkrecovery = 1;
-module_param_named(linkrecovery, ipath_linkrecovery, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(linkrecovery, "enable workaround for link recovery issue");
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("QLogic <support@qlogic.com>");
-MODULE_DESCRIPTION("QLogic InfiniPath driver");
-
-/*
- * Table to translate the LINKTRAININGSTATE portion of
- * IBCStatus to a human-readable form.
- */
-const char *ipath_ibcstatus_str[] = {
- "Disabled",
- "LinkUp",
- "PollActive",
- "PollQuiet",
- "SleepDelay",
- "SleepQuiet",
- "LState6", /* unused */
- "LState7", /* unused */
- "CfgDebounce",
- "CfgRcvfCfg",
- "CfgWaitRmt",
- "CfgIdle",
- "RecovRetrain",
- "CfgTxRevLane", /* unused before IBA7220 */
- "RecovWaitRmt",
- "RecovIdle",
- /* below were added for IBA7220 */
- "CfgEnhanced",
- "CfgTest",
- "CfgWaitRmtTest",
- "CfgWaitCfgEnhanced",
- "SendTS_T",
- "SendTstIdles",
- "RcvTS_T",
- "SendTst_TS1s",
- "LTState18", "LTState19", "LTState1A", "LTState1B",
- "LTState1C", "LTState1D", "LTState1E", "LTState1F"
-};
-
-static void ipath_remove_one(struct pci_dev *);
-static int ipath_init_one(struct pci_dev *, const struct pci_device_id *);
-
-/* Only needed for registration, nothing else needs this info */
-#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
-#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
-
-/* Number of seconds before our card status check... */
-#define STATUS_TIMEOUT 60
-
-static const struct pci_device_id ipath_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, ipath_pci_tbl);
-
-static struct pci_driver ipath_driver = {
- .name = IPATH_DRV_NAME,
- .probe = ipath_init_one,
- .remove = ipath_remove_one,
- .id_table = ipath_pci_tbl,
- .driver = {
- .groups = ipath_driver_attr_groups,
- },
-};
-
-static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
- u32 *bar0, u32 *bar1)
-{
- int ret;
-
- ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, bar0);
- if (ret)
- ipath_dev_err(dd, "failed to read bar0 before enable: "
- "error %d\n", -ret);
-
- ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, bar1);
- if (ret)
- ipath_dev_err(dd, "failed to read bar1 before enable: "
- "error %d\n", -ret);
-
- ipath_dbg("Read bar0 %x bar1 %x\n", *bar0, *bar1);
-}
-
-static void ipath_free_devdata(struct pci_dev *pdev,
- struct ipath_devdata *dd)
-{
- unsigned long flags;
-
- pci_set_drvdata(pdev, NULL);
-
- if (dd->ipath_unit != -1) {
- spin_lock_irqsave(&ipath_devs_lock, flags);
- idr_remove(&unit_table, dd->ipath_unit);
- list_del(&dd->ipath_list);
- spin_unlock_irqrestore(&ipath_devs_lock, flags);
- }
- vfree(dd);
-}
-
-static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
-{
- unsigned long flags;
- struct ipath_devdata *dd;
- int ret;
-
- dd = vzalloc(sizeof(*dd));
- if (!dd) {
- dd = ERR_PTR(-ENOMEM);
- goto bail;
- }
- dd->ipath_unit = -1;
-
- idr_preload(GFP_KERNEL);
- spin_lock_irqsave(&ipath_devs_lock, flags);
-
- ret = idr_alloc(&unit_table, dd, 0, 0, GFP_NOWAIT);
- if (ret < 0) {
- printk(KERN_ERR IPATH_DRV_NAME
- ": Could not allocate unit ID: error %d\n", -ret);
- ipath_free_devdata(pdev, dd);
- dd = ERR_PTR(ret);
- goto bail_unlock;
- }
- dd->ipath_unit = ret;
-
- dd->pcidev = pdev;
- pci_set_drvdata(pdev, dd);
-
- list_add(&dd->ipath_list, &ipath_dev_list);
-
-bail_unlock:
- spin_unlock_irqrestore(&ipath_devs_lock, flags);
- idr_preload_end();
-bail:
- return dd;
-}
-
-static inline struct ipath_devdata *__ipath_lookup(int unit)
-{
- return idr_find(&unit_table, unit);
-}
-
-struct ipath_devdata *ipath_lookup(int unit)
-{
- struct ipath_devdata *dd;
- unsigned long flags;
-
- spin_lock_irqsave(&ipath_devs_lock, flags);
- dd = __ipath_lookup(unit);
- spin_unlock_irqrestore(&ipath_devs_lock, flags);
-
- return dd;
-}
-
-int ipath_count_units(int *npresentp, int *nupp, int *maxportsp)
-{
- int nunits, npresent, nup;
- struct ipath_devdata *dd;
- unsigned long flags;
- int maxports;
-
- nunits = npresent = nup = maxports = 0;
-
- spin_lock_irqsave(&ipath_devs_lock, flags);
-
- list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
- nunits++;
- if ((dd->ipath_flags & IPATH_PRESENT) && dd->ipath_kregbase)
- npresent++;
- if (dd->ipath_lid &&
- !(dd->ipath_flags & (IPATH_DISABLED | IPATH_LINKDOWN
- | IPATH_LINKUNK)))
- nup++;
- if (dd->ipath_cfgports > maxports)
- maxports = dd->ipath_cfgports;
- }
-
- spin_unlock_irqrestore(&ipath_devs_lock, flags);
-
- if (npresentp)
- *npresentp = npresent;
- if (nupp)
- *nupp = nup;
- if (maxportsp)
- *maxportsp = maxports;
-
- return nunits;
-}
-
-/*
- * These next two routines are placeholders in case we don't have per-arch
- * code for controlling write combining. If explicit control of write
- * combining is not available, performance will probably be awful.
- */
-
-int __attribute__((weak)) ipath_enable_wc(struct ipath_devdata *dd)
-{
- return -EOPNOTSUPP;
-}
-
-void __attribute__((weak)) ipath_disable_wc(struct ipath_devdata *dd)
-{
-}
-
-/*
- * Perform a PIO buffer bandwidth write test, to verify proper system
- * configuration. Even when all the setup calls work, occasionally
- * BIOS or other issues can prevent write combining from working, or
- * can cause other bandwidth problems to the chip.
- *
- * This test simply writes the same buffer over and over again, and
- * measures close to the peak bandwidth to the chip (not testing
- * data bandwidth to the wire). On chips that use an address-based
- * trigger to send packets to the wire, this is easy. On chips that
- * use a count to trigger, we want to make sure that the packet doesn't
- * go out on the wire, or trigger flow control checks.
- */
-static void ipath_verify_pioperf(struct ipath_devdata *dd)
-{
- u32 pbnum, cnt, lcnt;
- u32 __iomem *piobuf;
- u32 *addr;
- u64 msecs, emsecs;
-
- piobuf = ipath_getpiobuf(dd, 0, &pbnum);
- if (!piobuf) {
- dev_info(&dd->pcidev->dev,
- "No PIObufs for checking perf, skipping\n");
- return;
- }
-
- /*
- * Enough to give us a reasonable test, less than piobuf size, and
- * likely multiple of store buffer length.
- */
- cnt = 1024;
-
- addr = vmalloc(cnt);
- if (!addr) {
- dev_info(&dd->pcidev->dev,
- "Couldn't get memory for checking PIO perf,"
- " skipping\n");
- goto done;
- }
-
- preempt_disable(); /* we want reasonably accurate elapsed time */
- msecs = 1 + jiffies_to_msecs(jiffies);
- for (lcnt = 0; lcnt < 10000U; lcnt++) {
- /* wait until we cross msec boundary */
- if (jiffies_to_msecs(jiffies) >= msecs)
- break;
- udelay(1);
- }
-
- ipath_disable_armlaunch(dd);
-
- /*
- * length 0, no dwords actually sent, and mark as VL15
- * on chips where that may matter (due to IB flowcontrol)
- */
- if ((dd->ipath_flags & IPATH_HAS_PBC_CNT))
- writeq(1UL << 63, piobuf);
- else
- writeq(0, piobuf);
- ipath_flush_wc();
-
- /*
- * this is only roughly accurate, since even with preempt we
- * still take interrupts that could take a while. Running for
- * >= 5 msec seems to get us "close enough" to accurate values
- */
- msecs = jiffies_to_msecs(jiffies);
- for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
- __iowrite32_copy(piobuf + 64, addr, cnt >> 2);
- emsecs = jiffies_to_msecs(jiffies) - msecs;
- }
-
- /* 1 GiB/sec, slightly over IB SDR line rate */
- if (lcnt < (emsecs * 1024U))
- ipath_dev_err(dd,
- "Performance problem: bandwidth to PIO buffers is "
- "only %u MiB/sec\n",
- lcnt / (u32) emsecs);
- else
- ipath_dbg("PIO buffer bandwidth %u MiB/sec is OK\n",
- lcnt / (u32) emsecs);
-
- preempt_enable();
-
- vfree(addr);
-
-done:
- /* disarm piobuf, so it's available again */
- ipath_disarm_piobufs(dd, pbnum, 1);
- ipath_enable_armlaunch(dd);
-}
-
-static void cleanup_device(struct ipath_devdata *dd);
-
-static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int ret, len, j;
- struct ipath_devdata *dd;
- unsigned long long addr;
- u32 bar0 = 0, bar1 = 0;
-
-#ifdef CONFIG_X86_64
- if (pat_enabled()) {
- pr_warn("ipath needs PAT disabled, boot with nopat kernel parameter\n");
- ret = -ENODEV;
- goto bail;
- }
-#endif
-
- dd = ipath_alloc_devdata(pdev);
- if (IS_ERR(dd)) {
- ret = PTR_ERR(dd);
- printk(KERN_ERR IPATH_DRV_NAME
- ": Could not allocate devdata: error %d\n", -ret);
- goto bail;
- }
-
- ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit);
-
- ret = pci_enable_device(pdev);
- if (ret) {
- /* This can happen iff:
- *
- * We did a chip reset, and then failed to reprogram the
- * BAR, or the chip reset due to an internal error. We then
- * unloaded the driver and reloaded it.
- *
- * Both reset cases set the BAR back to initial state. For
- * the latter case, the AER sticky error bit at offset 0x718
- * should be set, but the Linux kernel doesn't yet know
- * about that, it appears. If the original BAR was retained
- * in the kernel data structures, this may be OK.
- */
- ipath_dev_err(dd, "enable unit %d failed: error %d\n",
- dd->ipath_unit, -ret);
- goto bail_devdata;
- }
- addr = pci_resource_start(pdev, 0);
- len = pci_resource_len(pdev, 0);
- ipath_cdbg(VERBOSE, "regbase (0) %llx len %d irq %d, vend %x/%x "
- "driver_data %lx\n", addr, len, pdev->irq, ent->vendor,
- ent->device, ent->driver_data);
-
- read_bars(dd, pdev, &bar0, &bar1);
-
- if (!bar1 && !(bar0 & ~0xf)) {
- if (addr) {
- dev_info(&pdev->dev, "BAR is 0 (probable RESET), "
- "rewriting as %llx\n", addr);
- ret = pci_write_config_dword(
- pdev, PCI_BASE_ADDRESS_0, addr);
- if (ret) {
- ipath_dev_err(dd, "rewrite of BAR0 "
- "failed: err %d\n", -ret);
- goto bail_disable;
- }
- ret = pci_write_config_dword(
- pdev, PCI_BASE_ADDRESS_1, addr >> 32);
- if (ret) {
- ipath_dev_err(dd, "rewrite of BAR1 "
- "failed: err %d\n", -ret);
- goto bail_disable;
- }
- } else {
- ipath_dev_err(dd, "BAR is 0 (probable RESET), "
- "not usable until reboot\n");
- ret = -ENODEV;
- goto bail_disable;
- }
- }
-
- ret = pci_request_regions(pdev, IPATH_DRV_NAME);
- if (ret) {
- dev_info(&pdev->dev, "pci_request_regions unit %u fails: "
- "err %d\n", dd->ipath_unit, -ret);
- goto bail_disable;
- }
-
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (ret) {
- /*
- * if the 64 bit setup fails, try 32 bit. Some systems
- * do not setup 64 bit maps on systems with 2GB or less
- * memory installed.
- */
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret) {
- dev_info(&pdev->dev,
- "Unable to set DMA mask for unit %u: %d\n",
- dd->ipath_unit, ret);
- goto bail_regions;
- } else {
- ipath_dbg("No 64bit DMA mask, used 32 bit mask\n");
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret)
- dev_info(&pdev->dev,
- "Unable to set DMA consistent mask "
- "for unit %u: %d\n",
- dd->ipath_unit, ret);
-
- }
- } else {
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (ret)
- dev_info(&pdev->dev,
- "Unable to set DMA consistent mask "
- "for unit %u: %d\n",
- dd->ipath_unit, ret);
- }
-
- pci_set_master(pdev);
-
- /*
- * Save BARs to rewrite after device reset. Save all 64 bits of
- * BAR, just in case.
- */
- dd->ipath_pcibar0 = addr;
- dd->ipath_pcibar1 = addr >> 32;
- dd->ipath_deviceid = ent->device; /* save for later use */
- dd->ipath_vendorid = ent->vendor;
-
- /* setup the chip-specific functions, as early as possible. */
- switch (ent->device) {
- case PCI_DEVICE_ID_INFINIPATH_HT:
- ipath_init_iba6110_funcs(dd);
- break;
-
- default:
- ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
- "failing\n", ent->device);
- return -ENODEV;
- }
-
- for (j = 0; j < 6; j++) {
- if (!pdev->resource[j].start)
- continue;
- ipath_cdbg(VERBOSE, "BAR %d %pR, len %llx\n",
- j, &pdev->resource[j],
- (unsigned long long)pci_resource_len(pdev, j));
- }
-
- if (!addr) {
- ipath_dev_err(dd, "No valid address in BAR 0!\n");
- ret = -ENODEV;
- goto bail_regions;
- }
-
- dd->ipath_pcirev = pdev->revision;
-
-#if defined(__powerpc__)
- /* There isn't a generic way to specify writethrough mappings */
- dd->ipath_kregbase = __ioremap(addr, len,
- (_PAGE_NO_CACHE|_PAGE_WRITETHRU));
-#else
- /* XXX: split this properly to enable on PAT */
- dd->ipath_kregbase = ioremap_nocache(addr, len);
-#endif
-
- if (!dd->ipath_kregbase) {
- ipath_dbg("Unable to map io addr %llx to kvirt, failing\n",
- addr);
- ret = -ENOMEM;
- goto bail_iounmap;
- }
- dd->ipath_kregend = (u64 __iomem *)
- ((void __iomem *)dd->ipath_kregbase + len);
- dd->ipath_physaddr = addr; /* used for io_remap, etc. */
- /* for user mmap */
- ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p\n",
- addr, dd->ipath_kregbase);
-
- if (dd->ipath_f_bus(dd, pdev))
- ipath_dev_err(dd, "Failed to setup config space; "
- "continuing anyway\n");
-
- /*
- * set up our interrupt handler; IRQF_SHARED probably not needed,
- * since MSI interrupts shouldn't be shared but won't hurt for now.
- * check 0 irq after we return from chip-specific bus setup, since
- * that can affect this due to setup
- */
- if (!dd->ipath_irq)
- ipath_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
- "work\n");
- else {
- ret = request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED,
- IPATH_DRV_NAME, dd);
- if (ret) {
- ipath_dev_err(dd, "Couldn't setup irq handler, "
- "irq=%d: %d\n", dd->ipath_irq, ret);
- goto bail_iounmap;
- }
- }
-
- ret = ipath_init_chip(dd, 0); /* do the chip-specific init */
- if (ret)
- goto bail_irqsetup;
-
- ret = ipath_enable_wc(dd);
-
- if (ret)
- ret = 0;
-
- ipath_verify_pioperf(dd);
-
- ipath_device_create_group(&pdev->dev, dd);
- ipathfs_add_device(dd);
- ipath_user_add(dd);
- ipath_diag_add(dd);
- ipath_register_ib_device(dd);
-
- goto bail;
-
-bail_irqsetup:
- cleanup_device(dd);
-
- if (dd->ipath_irq)
- dd->ipath_f_free_irq(dd);
-
- if (dd->ipath_f_cleanup)
- dd->ipath_f_cleanup(dd);
-
-bail_iounmap:
- iounmap((volatile void __iomem *) dd->ipath_kregbase);
-
-bail_regions:
- pci_release_regions(pdev);
-
-bail_disable:
- pci_disable_device(pdev);
-
-bail_devdata:
- ipath_free_devdata(pdev, dd);
-
-bail:
- return ret;
-}
-
-static void cleanup_device(struct ipath_devdata *dd)
-{
- int port;
- struct ipath_portdata **tmp;
- unsigned long flags;
-
- if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) {
- /* can't do anything more with chip; needs re-init */
- *dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT;
- if (dd->ipath_kregbase) {
- /*
- * if we haven't already cleaned up before these are
- * to ensure any register reads/writes "fail" until
- * re-init
- */
- dd->ipath_kregbase = NULL;
- dd->ipath_uregbase = 0;
- dd->ipath_sregbase = 0;
- dd->ipath_cregbase = 0;
- dd->ipath_kregsize = 0;
- }
- ipath_disable_wc(dd);
- }
-
- if (dd->ipath_spectriggerhit)
- dev_info(&dd->pcidev->dev, "%lu special trigger hits\n",
- dd->ipath_spectriggerhit);
-
- if (dd->ipath_pioavailregs_dma) {
- dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
- (void *) dd->ipath_pioavailregs_dma,
- dd->ipath_pioavailregs_phys);
- dd->ipath_pioavailregs_dma = NULL;
- }
- if (dd->ipath_dummy_hdrq) {
- dma_free_coherent(&dd->pcidev->dev,
- dd->ipath_pd[0]->port_rcvhdrq_size,
- dd->ipath_dummy_hdrq, dd->ipath_dummy_hdrq_phys);
- dd->ipath_dummy_hdrq = NULL;
- }
-
- if (dd->ipath_pageshadow) {
- struct page **tmpp = dd->ipath_pageshadow;
- dma_addr_t *tmpd = dd->ipath_physshadow;
- int i, cnt = 0;
-
- ipath_cdbg(VERBOSE, "Unlocking any expTID pages still "
- "locked\n");
- for (port = 0; port < dd->ipath_cfgports; port++) {
- int port_tidbase = port * dd->ipath_rcvtidcnt;
- int maxtid = port_tidbase + dd->ipath_rcvtidcnt;
- for (i = port_tidbase; i < maxtid; i++) {
- if (!tmpp[i])
- continue;
- pci_unmap_page(dd->pcidev, tmpd[i],
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
- ipath_release_user_pages(&tmpp[i], 1);
- tmpp[i] = NULL;
- cnt++;
- }
- }
- if (cnt) {
- ipath_stats.sps_pageunlocks += cnt;
- ipath_cdbg(VERBOSE, "There were still %u expTID "
- "entries locked\n", cnt);
- }
- if (ipath_stats.sps_pagelocks ||
- ipath_stats.sps_pageunlocks)
- ipath_cdbg(VERBOSE, "%llu pages locked, %llu "
- "unlocked via ipath_m{un}lock\n",
- (unsigned long long)
- ipath_stats.sps_pagelocks,
- (unsigned long long)
- ipath_stats.sps_pageunlocks);
-
- ipath_cdbg(VERBOSE, "Free shadow page tid array at %p\n",
- dd->ipath_pageshadow);
- tmpp = dd->ipath_pageshadow;
- dd->ipath_pageshadow = NULL;
- vfree(tmpp);
-
- dd->ipath_egrtidbase = NULL;
- }
-
- /*
- * free any resources still in use (usually just kernel ports)
- * at unload; we do for portcnt, because that's what we allocate.
- * We acquire lock to be really paranoid that ipath_pd isn't being
- * accessed from some interrupt-related code (that should not happen,
- * but best to be sure).
- */
- spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
- tmp = dd->ipath_pd;
- dd->ipath_pd = NULL;
- spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
- for (port = 0; port < dd->ipath_portcnt; port++) {
- struct ipath_portdata *pd = tmp[port];
- tmp[port] = NULL; /* debugging paranoia */
- ipath_free_pddata(dd, pd);
- }
- kfree(tmp);
-}
-
-static void ipath_remove_one(struct pci_dev *pdev)
-{
- struct ipath_devdata *dd = pci_get_drvdata(pdev);
-
- ipath_cdbg(VERBOSE, "removing, pdev=%p, dd=%p\n", pdev, dd);
-
- /*
- * disable the IB link early, to be sure no new packets arrive, which
- * complicates the shutdown process
- */
- ipath_shutdown_device(dd);
-
- flush_workqueue(ib_wq);
-
- if (dd->verbs_dev)
- ipath_unregister_ib_device(dd->verbs_dev);
-
- ipath_diag_remove(dd);
- ipath_user_remove(dd);
- ipathfs_remove_device(dd);
- ipath_device_remove_group(&pdev->dev, dd);
-
- ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, "
- "unit %u\n", dd, (u32) dd->ipath_unit);
-
- cleanup_device(dd);
-
- /*
- * turn off rcv, send, and interrupts for all ports, all drivers
- * should also hard reset the chip here?
- * free up port 0 (kernel) rcvhdr, egr bufs, and eventually tid bufs
- * for all versions of the driver, if they were allocated
- */
- if (dd->ipath_irq) {
- ipath_cdbg(VERBOSE, "unit %u free irq %d\n",
- dd->ipath_unit, dd->ipath_irq);
- dd->ipath_f_free_irq(dd);
- } else
- ipath_dbg("irq is 0, not doing free_irq "
- "for unit %u\n", dd->ipath_unit);
- /*
- * we check for NULL here, because it's outside
- * the kregbase check, and we need to call it
- * after the free_irq. Thus it's possible that
- * the function pointers were never initialized.
- */
- if (dd->ipath_f_cleanup)
- /* clean up chip-specific stuff */
- dd->ipath_f_cleanup(dd);
-
- ipath_cdbg(VERBOSE, "Unmapping kregbase %p\n", dd->ipath_kregbase);
- iounmap((volatile void __iomem *) dd->ipath_kregbase);
- pci_release_regions(pdev);
- ipath_cdbg(VERBOSE, "calling pci_disable_device\n");
- pci_disable_device(pdev);
-
- ipath_free_devdata(pdev, dd);
-}
-
-/* general driver use */
-DEFINE_MUTEX(ipath_mutex);
-
-static DEFINE_SPINLOCK(ipath_pioavail_lock);
-
-/**
- * ipath_disarm_piobufs - cancel a range of PIO buffers
- * @dd: the infinipath device
- * @first: the first PIO buffer to cancel
- * @cnt: the number of PIO buffers to cancel
- *
- * cancel a range of PIO buffers, used when they might be armed, but
- * not triggered. Used at init to ensure buffer state, and also user
- * process close, in case it died while writing to a PIO buffer
- * Also after errors.
- */
-void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
- unsigned cnt)
-{
- unsigned i, last = first + cnt;
- unsigned long flags;
-
- ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
- for (i = first; i < last; i++) {
- spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
- /*
- * The disarm-related bits are write-only, so it
- * is ok to OR them in with our copy of sendctrl
- * while we hold the lock.
- */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- dd->ipath_sendctrl | INFINIPATH_S_DISARM |
- (i << INFINIPATH_S_DISARMPIOBUF_SHIFT));
- /* can't disarm bufs back-to-back per iba7220 spec */
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
- }
- /* on some older chips, update may not happen after cancel */
- ipath_force_pio_avail_update(dd);
-}
-
-/**
- * ipath_wait_linkstate - wait for an IB link state change to occur
- * @dd: the infinipath device
- * @state: the state to wait for
- * @msecs: the number of milliseconds to wait
- *
- * wait up to msecs milliseconds for IB link state change to occur for
- * now, take the easy polling route. Currently used only by
- * ipath_set_linkstate. Returns 0 if state reached, otherwise
- * -ETIMEDOUT state can have multiple states set, for any of several
- * transitions.
- */
-int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs)
-{
- dd->ipath_state_wanted = state;
- wait_event_interruptible_timeout(ipath_state_wait,
- (dd->ipath_flags & state),
- msecs_to_jiffies(msecs));
- dd->ipath_state_wanted = 0;
-
- if (!(dd->ipath_flags & state)) {
- u64 val;
- ipath_cdbg(VERBOSE, "Didn't reach linkstate %s within %u"
- " ms\n",
- /* test INIT ahead of DOWN, both can be set */
- (state & IPATH_LINKINIT) ? "INIT" :
- ((state & IPATH_LINKDOWN) ? "DOWN" :
- ((state & IPATH_LINKARMED) ? "ARM" : "ACTIVE")),
- msecs);
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
- ipath_cdbg(VERBOSE, "ibcc=%llx ibcstatus=%llx (%s)\n",
- (unsigned long long) ipath_read_kreg64(
- dd, dd->ipath_kregs->kr_ibcctrl),
- (unsigned long long) val,
- ipath_ibcstatus_str[val & dd->ibcs_lts_mask]);
- }
- return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
-}
-
-static void decode_sdma_errs(struct ipath_devdata *dd, ipath_err_t err,
- char *buf, size_t blen)
-{
- static const struct {
- ipath_err_t err;
- const char *msg;
- } errs[] = {
- { INFINIPATH_E_SDMAGENMISMATCH, "SDmaGenMismatch" },
- { INFINIPATH_E_SDMAOUTOFBOUND, "SDmaOutOfBound" },
- { INFINIPATH_E_SDMATAILOUTOFBOUND, "SDmaTailOutOfBound" },
- { INFINIPATH_E_SDMABASE, "SDmaBase" },
- { INFINIPATH_E_SDMA1STDESC, "SDma1stDesc" },
- { INFINIPATH_E_SDMARPYTAG, "SDmaRpyTag" },
- { INFINIPATH_E_SDMADWEN, "SDmaDwEn" },
- { INFINIPATH_E_SDMAMISSINGDW, "SDmaMissingDw" },
- { INFINIPATH_E_SDMAUNEXPDATA, "SDmaUnexpData" },
- { INFINIPATH_E_SDMADESCADDRMISALIGN, "SDmaDescAddrMisalign" },
- { INFINIPATH_E_SENDBUFMISUSE, "SendBufMisuse" },
- { INFINIPATH_E_SDMADISABLED, "SDmaDisabled" },
- };
- int i;
- int expected;
- size_t bidx = 0;
-
- for (i = 0; i < ARRAY_SIZE(errs); i++) {
- expected = (errs[i].err != INFINIPATH_E_SDMADISABLED) ? 0 :
- test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
- if ((err & errs[i].err) && !expected)
- bidx += snprintf(buf + bidx, blen - bidx,
- "%s ", errs[i].msg);
- }
-}
-
-/*
- * Decode the error status into strings, deciding whether to always
- * print * it or not depending on "normal packet errors" vs everything
- * else. Return 1 if "real" errors, otherwise 0 if only packet
- * errors, so caller can decide what to print with the string.
- */
-int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
- ipath_err_t err)
-{
- int iserr = 1;
- *buf = '\0';
- if (err & INFINIPATH_E_PKTERRS) {
- if (!(err & ~INFINIPATH_E_PKTERRS))
- iserr = 0; // if only packet errors.
- if (ipath_debug & __IPATH_ERRPKTDBG) {
- if (err & INFINIPATH_E_REBP)
- strlcat(buf, "EBP ", blen);
- if (err & INFINIPATH_E_RVCRC)
- strlcat(buf, "VCRC ", blen);
- if (err & INFINIPATH_E_RICRC) {
- strlcat(buf, "CRC ", blen);
- // clear for check below, so only once
- err &= INFINIPATH_E_RICRC;
- }
- if (err & INFINIPATH_E_RSHORTPKTLEN)
- strlcat(buf, "rshortpktlen ", blen);
- if (err & INFINIPATH_E_SDROPPEDDATAPKT)
- strlcat(buf, "sdroppeddatapkt ", blen);
- if (err & INFINIPATH_E_SPKTLEN)
- strlcat(buf, "spktlen ", blen);
- }
- if ((err & INFINIPATH_E_RICRC) &&
- !(err&(INFINIPATH_E_RVCRC|INFINIPATH_E_REBP)))
- strlcat(buf, "CRC ", blen);
- if (!iserr)
- goto done;
- }
- if (err & INFINIPATH_E_RHDRLEN)
- strlcat(buf, "rhdrlen ", blen);
- if (err & INFINIPATH_E_RBADTID)
- strlcat(buf, "rbadtid ", blen);
- if (err & INFINIPATH_E_RBADVERSION)
- strlcat(buf, "rbadversion ", blen);
- if (err & INFINIPATH_E_RHDR)
- strlcat(buf, "rhdr ", blen);
- if (err & INFINIPATH_E_SENDSPECIALTRIGGER)
- strlcat(buf, "sendspecialtrigger ", blen);
- if (err & INFINIPATH_E_RLONGPKTLEN)
- strlcat(buf, "rlongpktlen ", blen);
- if (err & INFINIPATH_E_RMAXPKTLEN)
- strlcat(buf, "rmaxpktlen ", blen);
- if (err & INFINIPATH_E_RMINPKTLEN)
- strlcat(buf, "rminpktlen ", blen);
- if (err & INFINIPATH_E_SMINPKTLEN)
- strlcat(buf, "sminpktlen ", blen);
- if (err & INFINIPATH_E_RFORMATERR)
- strlcat(buf, "rformaterr ", blen);
- if (err & INFINIPATH_E_RUNSUPVL)
- strlcat(buf, "runsupvl ", blen);
- if (err & INFINIPATH_E_RUNEXPCHAR)
- strlcat(buf, "runexpchar ", blen);
- if (err & INFINIPATH_E_RIBFLOW)
- strlcat(buf, "ribflow ", blen);
- if (err & INFINIPATH_E_SUNDERRUN)
- strlcat(buf, "sunderrun ", blen);
- if (err & INFINIPATH_E_SPIOARMLAUNCH)
- strlcat(buf, "spioarmlaunch ", blen);
- if (err & INFINIPATH_E_SUNEXPERRPKTNUM)
- strlcat(buf, "sunexperrpktnum ", blen);
- if (err & INFINIPATH_E_SDROPPEDSMPPKT)
- strlcat(buf, "sdroppedsmppkt ", blen);
- if (err & INFINIPATH_E_SMAXPKTLEN)
- strlcat(buf, "smaxpktlen ", blen);
- if (err & INFINIPATH_E_SUNSUPVL)
- strlcat(buf, "sunsupVL ", blen);
- if (err & INFINIPATH_E_INVALIDADDR)
- strlcat(buf, "invalidaddr ", blen);
- if (err & INFINIPATH_E_RRCVEGRFULL)
- strlcat(buf, "rcvegrfull ", blen);
- if (err & INFINIPATH_E_RRCVHDRFULL)
- strlcat(buf, "rcvhdrfull ", blen);
- if (err & INFINIPATH_E_IBSTATUSCHANGED)
- strlcat(buf, "ibcstatuschg ", blen);
- if (err & INFINIPATH_E_RIBLOSTLINK)
- strlcat(buf, "riblostlink ", blen);
- if (err & INFINIPATH_E_HARDWARE)
- strlcat(buf, "hardware ", blen);
- if (err & INFINIPATH_E_RESET)
- strlcat(buf, "reset ", blen);
- if (err & INFINIPATH_E_SDMAERRS)
- decode_sdma_errs(dd, err, buf, blen);
- if (err & INFINIPATH_E_INVALIDEEPCMD)
- strlcat(buf, "invalideepromcmd ", blen);
-done:
- return iserr;
-}
-
-/**
- * get_rhf_errstring - decode RHF errors
- * @err: the err number
- * @msg: the output buffer
- * @len: the length of the output buffer
- *
- * only used one place now, may want more later
- */
-static void get_rhf_errstring(u32 err, char *msg, size_t len)
-{
- /* if no errors, and so don't need to check what's first */
- *msg = '\0';
-
- if (err & INFINIPATH_RHF_H_ICRCERR)
- strlcat(msg, "icrcerr ", len);
- if (err & INFINIPATH_RHF_H_VCRCERR)
- strlcat(msg, "vcrcerr ", len);
- if (err & INFINIPATH_RHF_H_PARITYERR)
- strlcat(msg, "parityerr ", len);
- if (err & INFINIPATH_RHF_H_LENERR)
- strlcat(msg, "lenerr ", len);
- if (err & INFINIPATH_RHF_H_MTUERR)
- strlcat(msg, "mtuerr ", len);
- if (err & INFINIPATH_RHF_H_IHDRERR)
- /* infinipath hdr checksum error */
- strlcat(msg, "ipathhdrerr ", len);
- if (err & INFINIPATH_RHF_H_TIDERR)
- strlcat(msg, "tiderr ", len);
- if (err & INFINIPATH_RHF_H_MKERR)
- /* bad port, offset, etc. */
- strlcat(msg, "invalid ipathhdr ", len);
- if (err & INFINIPATH_RHF_H_IBERR)
- strlcat(msg, "iberr ", len);
- if (err & INFINIPATH_RHF_L_SWA)
- strlcat(msg, "swA ", len);
- if (err & INFINIPATH_RHF_L_SWB)
- strlcat(msg, "swB ", len);
-}
-
-/**
- * ipath_get_egrbuf - get an eager buffer
- * @dd: the infinipath device
- * @bufnum: the eager buffer to get
- *
- * must only be called if ipath_pd[port] is known to be allocated
- */
-static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum)
-{
- return dd->ipath_port0_skbinfo ?
- (void *) dd->ipath_port0_skbinfo[bufnum].skb->data : NULL;
-}
-
-/**
- * ipath_alloc_skb - allocate an skb and buffer with possible constraints
- * @dd: the infinipath device
- * @gfp_mask: the sk_buff SFP mask
- */
-struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd,
- gfp_t gfp_mask)
-{
- struct sk_buff *skb;
- u32 len;
-
- /*
- * Only fully supported way to handle this is to allocate lots
- * extra, align as needed, and then do skb_reserve(). That wastes
- * a lot of memory... I'll have to hack this into infinipath_copy
- * also.
- */
-
- /*
- * We need 2 extra bytes for ipath_ether data sent in the
- * key header. In order to keep everything dword aligned,
- * we'll reserve 4 bytes.
- */
- len = dd->ipath_ibmaxlen + 4;
-
- if (dd->ipath_flags & IPATH_4BYTE_TID) {
- /* We need a 2KB multiple alignment, and there is no way
- * to do it except to allocate extra and then skb_reserve
- * enough to bring it up to the right alignment.
- */
- len += 2047;
- }
-
- skb = __dev_alloc_skb(len, gfp_mask);
- if (!skb) {
- ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n",
- len);
- goto bail;
- }
-
- skb_reserve(skb, 4);
-
- if (dd->ipath_flags & IPATH_4BYTE_TID) {
- u32 una = (unsigned long)skb->data & 2047;
- if (una)
- skb_reserve(skb, 2048 - una);
- }
-
-bail:
- return skb;
-}
-
-static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
- u32 eflags,
- u32 l,
- u32 etail,
- __le32 *rhf_addr,
- struct ipath_message_header *hdr)
-{
- char emsg[128];
-
- get_rhf_errstring(eflags, emsg, sizeof emsg);
- ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
- "tlen=%x opcode=%x egridx=%x: %s\n",
- eflags, l,
- ipath_hdrget_rcv_type(rhf_addr),
- ipath_hdrget_length_in_bytes(rhf_addr),
- be32_to_cpu(hdr->bth[0]) >> 24,
- etail, emsg);
-
- /* Count local link integrity errors. */
- if (eflags & (INFINIPATH_RHF_H_ICRCERR | INFINIPATH_RHF_H_VCRCERR)) {
- u8 n = (dd->ipath_ibcctrl >>
- INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
- INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
-
- if (++dd->ipath_lli_counter > n) {
- dd->ipath_lli_counter = 0;
- dd->ipath_lli_errors++;
- }
- }
-}
-
-/*
- * ipath_kreceive - receive a packet
- * @pd: the infinipath port
- *
- * called from interrupt handler for errors or receive interrupt
- */
-void ipath_kreceive(struct ipath_portdata *pd)
-{
- struct ipath_devdata *dd = pd->port_dd;
- __le32 *rhf_addr;
- void *ebuf;
- const u32 rsize = dd->ipath_rcvhdrentsize; /* words */
- const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */
- u32 etail = -1, l, hdrqtail;
- struct ipath_message_header *hdr;
- u32 eflags, i, etype, tlen, pkttot = 0, updegr = 0, reloop = 0;
- static u64 totcalls; /* stats, may eventually remove */
- int last;
-
- l = pd->port_head;
- rhf_addr = (__le32 *) pd->port_rcvhdrq + l + dd->ipath_rhf_offset;
- if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
- u32 seq = ipath_hdrget_seq(rhf_addr);
-
- if (seq != pd->port_seq_cnt)
- goto bail;
- hdrqtail = 0;
- } else {
- hdrqtail = ipath_get_rcvhdrtail(pd);
- if (l == hdrqtail)
- goto bail;
- smp_rmb();
- }
-
-reloop:
- for (last = 0, i = 1; !last; i += !last) {
- hdr = dd->ipath_f_get_msgheader(dd, rhf_addr);
- eflags = ipath_hdrget_err_flags(rhf_addr);
- etype = ipath_hdrget_rcv_type(rhf_addr);
- /* total length */
- tlen = ipath_hdrget_length_in_bytes(rhf_addr);
- ebuf = NULL;
- if ((dd->ipath_flags & IPATH_NODMA_RTAIL) ?
- ipath_hdrget_use_egr_buf(rhf_addr) :
- (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
- /*
- * It turns out that the chip uses an eager buffer
- * for all non-expected packets, whether it "needs"
- * one or not. So always get the index, but don't
- * set ebuf (so we try to copy data) unless the
- * length requires it.
- */
- etail = ipath_hdrget_index(rhf_addr);
- updegr = 1;
- if (tlen > sizeof(*hdr) ||
- etype == RCVHQ_RCV_TYPE_NON_KD)
- ebuf = ipath_get_egrbuf(dd, etail);
- }
-
- /*
- * both tiderr and ipathhdrerr are set for all plain IB
- * packets; only ipathhdrerr should be set.
- */
-
- if (etype != RCVHQ_RCV_TYPE_NON_KD &&
- etype != RCVHQ_RCV_TYPE_ERROR &&
- ipath_hdrget_ipath_ver(hdr->iph.ver_port_tid_offset) !=
- IPS_PROTO_VERSION)
- ipath_cdbg(PKT, "Bad InfiniPath protocol version "
- "%x\n", etype);
-
- if (unlikely(eflags))
- ipath_rcv_hdrerr(dd, eflags, l, etail, rhf_addr, hdr);
- else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
- ipath_ib_rcv(dd->verbs_dev, (u32 *)hdr, ebuf, tlen);
- if (dd->ipath_lli_counter)
- dd->ipath_lli_counter--;
- } else if (etype == RCVHQ_RCV_TYPE_EAGER) {
- u8 opcode = be32_to_cpu(hdr->bth[0]) >> 24;
- u32 qp = be32_to_cpu(hdr->bth[1]) & 0xffffff;
- ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
- "qp=%x), len %x; ignored\n",
- etype, opcode, qp, tlen);
- } else if (etype == RCVHQ_RCV_TYPE_EXPECTED) {
- ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
- be32_to_cpu(hdr->bth[0]) >> 24);
- } else {
- /*
- * error packet, type of error unknown.
- * Probably type 3, but we don't know, so don't
- * even try to print the opcode, etc.
- * Usually caused by a "bad packet", that has no
- * BTH, when the LRH says it should.
- */
- ipath_cdbg(ERRPKT, "Error Pkt, but no eflags! egrbuf"
- " %x, len %x hdrq+%x rhf: %Lx\n",
- etail, tlen, l, (unsigned long long)
- le64_to_cpu(*(__le64 *) rhf_addr));
- if (ipath_debug & __IPATH_ERRPKTDBG) {
- u32 j, *d, dw = rsize-2;
- if (rsize > (tlen>>2))
- dw = tlen>>2;
- d = (u32 *)hdr;
- printk(KERN_DEBUG "EPkt rcvhdr(%x dw):\n",
- dw);
- for (j = 0; j < dw; j++)
- printk(KERN_DEBUG "%8x%s", d[j],
- (j%8) == 7 ? "\n" : " ");
- printk(KERN_DEBUG ".\n");
- }
- }
- l += rsize;
- if (l >= maxcnt)
- l = 0;
- rhf_addr = (__le32 *) pd->port_rcvhdrq +
- l + dd->ipath_rhf_offset;
- if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
- u32 seq = ipath_hdrget_seq(rhf_addr);
-
- if (++pd->port_seq_cnt > 13)
- pd->port_seq_cnt = 1;
- if (seq != pd->port_seq_cnt)
- last = 1;
- } else if (l == hdrqtail) {
- last = 1;
- }
- /*
- * update head regs on last packet, and every 16 packets.
- * Reduce bus traffic, while still trying to prevent
- * rcvhdrq overflows, for when the queue is nearly full
- */
- if (last || !(i & 0xf)) {
- u64 lval = l;
-
- /* request IBA6120 and 7220 interrupt only on last */
- if (last)
- lval |= dd->ipath_rhdrhead_intr_off;
- ipath_write_ureg(dd, ur_rcvhdrhead, lval,
- pd->port_port);
- if (updegr) {
- ipath_write_ureg(dd, ur_rcvegrindexhead,
- etail, pd->port_port);
- updegr = 0;
- }
- }
- }
-
- if (!dd->ipath_rhdrhead_intr_off && !reloop &&
- !(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
- /* IBA6110 workaround; we can have a race clearing chip
- * interrupt with another interrupt about to be delivered,
- * and can clear it before it is delivered on the GPIO
- * workaround. By doing the extra check here for the
- * in-memory tail register updating while we were doing
- * earlier packets, we "almost" guarantee we have covered
- * that case.
- */
- u32 hqtail = ipath_get_rcvhdrtail(pd);
- if (hqtail != hdrqtail) {
- hdrqtail = hqtail;
- reloop = 1; /* loop 1 extra time at most */
- goto reloop;
- }
- }
-
- pkttot += i;
-
- pd->port_head = l;
-
- if (pkttot > ipath_stats.sps_maxpkts_call)
- ipath_stats.sps_maxpkts_call = pkttot;
- ipath_stats.sps_port0pkts += pkttot;
- ipath_stats.sps_avgpkts_call =
- ipath_stats.sps_port0pkts / ++totcalls;
-
-bail:;
-}
-
-/**
- * ipath_update_pio_bufs - update shadow copy of the PIO availability map
- * @dd: the infinipath device
- *
- * called whenever our local copy indicates we have run out of send buffers
- * NOTE: This can be called from interrupt context by some code
- * and from non-interrupt context by ipath_getpiobuf().
- */
-
-static void ipath_update_pio_bufs(struct ipath_devdata *dd)
-{
- unsigned long flags;
- int i;
- const unsigned piobregs = (unsigned)dd->ipath_pioavregs;
-
- /* If the generation (check) bits have changed, then we update the
- * busy bit for the corresponding PIO buffer. This algorithm will
- * modify positions to the value they already have in some cases
- * (i.e., no change), but it's faster than changing only the bits
- * that have changed.
- *
- * We would like to do this atomicly, to avoid spinlocks in the
- * critical send path, but that's not really possible, given the
- * type of changes, and that this routine could be called on
- * multiple cpu's simultaneously, so we lock in this routine only,
- * to avoid conflicting updates; all we change is the shadow, and
- * it's a single 64 bit memory location, so by definition the update
- * is atomic in terms of what other cpu's can see in testing the
- * bits. The spin_lock overhead isn't too bad, since it only
- * happens when all buffers are in use, so only cpu overhead, not
- * latency or bandwidth is affected.
- */
- if (!dd->ipath_pioavailregs_dma) {
- ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n");
- return;
- }
- if (ipath_debug & __IPATH_VERBDBG) {
- /* only if packet debug and verbose */
- volatile __le64 *dma = dd->ipath_pioavailregs_dma;
- unsigned long *shadow = dd->ipath_pioavailshadow;
-
- ipath_cdbg(PKT, "Refill avail, dma0=%llx shad0=%lx, "
- "d1=%llx s1=%lx, d2=%llx s2=%lx, d3=%llx "
- "s3=%lx\n",
- (unsigned long long) le64_to_cpu(dma[0]),
- shadow[0],
- (unsigned long long) le64_to_cpu(dma[1]),
- shadow[1],
- (unsigned long long) le64_to_cpu(dma[2]),
- shadow[2],
- (unsigned long long) le64_to_cpu(dma[3]),
- shadow[3]);
- if (piobregs > 4)
- ipath_cdbg(
- PKT, "2nd group, dma4=%llx shad4=%lx, "
- "d5=%llx s5=%lx, d6=%llx s6=%lx, "
- "d7=%llx s7=%lx\n",
- (unsigned long long) le64_to_cpu(dma[4]),
- shadow[4],
- (unsigned long long) le64_to_cpu(dma[5]),
- shadow[5],
- (unsigned long long) le64_to_cpu(dma[6]),
- shadow[6],
- (unsigned long long) le64_to_cpu(dma[7]),
- shadow[7]);
- }
- spin_lock_irqsave(&ipath_pioavail_lock, flags);
- for (i = 0; i < piobregs; i++) {
- u64 pchbusy, pchg, piov, pnew;
- /*
- * Chip Errata: bug 6641; even and odd qwords>3 are swapped
- */
- if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
- piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i ^ 1]);
- else
- piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
- pchg = dd->ipath_pioavailkernel[i] &
- ~(dd->ipath_pioavailshadow[i] ^ piov);
- pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT;
- if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) {
- pnew = dd->ipath_pioavailshadow[i] & ~pchbusy;
- pnew |= piov & pchbusy;
- dd->ipath_pioavailshadow[i] = pnew;
- }
- }
- spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
-}
-
-/*
- * used to force update of pioavailshadow if we can't get a pio buffer.
- * Needed primarily due to exitting freeze mode after recovering
- * from errors. Done lazily, because it's safer (known to not
- * be writing pio buffers).
- */
-static void ipath_reset_availshadow(struct ipath_devdata *dd)
-{
- int i, im;
- unsigned long flags;
-
- spin_lock_irqsave(&ipath_pioavail_lock, flags);
- for (i = 0; i < dd->ipath_pioavregs; i++) {
- u64 val, oldval;
- /* deal with 6110 chip bug on high register #s */
- im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
- i ^ 1 : i;
- val = le64_to_cpu(dd->ipath_pioavailregs_dma[im]);
- /*
- * busy out the buffers not in the kernel avail list,
- * without changing the generation bits.
- */
- oldval = dd->ipath_pioavailshadow[i];
- dd->ipath_pioavailshadow[i] = val |
- ((~dd->ipath_pioavailkernel[i] <<
- INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT) &
- 0xaaaaaaaaaaaaaaaaULL); /* All BUSY bits in qword */
- if (oldval != dd->ipath_pioavailshadow[i])
- ipath_dbg("shadow[%d] was %Lx, now %lx\n",
- i, (unsigned long long) oldval,
- dd->ipath_pioavailshadow[i]);
- }
- spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
-}
-
-/**
- * ipath_setrcvhdrsize - set the receive header size
- * @dd: the infinipath device
- * @rhdrsize: the receive header size
- *
- * called from user init code, and also layered driver init
- */
-int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
-{
- int ret = 0;
-
- if (dd->ipath_flags & IPATH_RCVHDRSZ_SET) {
- if (dd->ipath_rcvhdrsize != rhdrsize) {
- dev_info(&dd->pcidev->dev,
- "Error: can't set protocol header "
- "size %u, already %u\n",
- rhdrsize, dd->ipath_rcvhdrsize);
- ret = -EAGAIN;
- } else
- ipath_cdbg(VERBOSE, "Reuse same protocol header "
- "size %u\n", dd->ipath_rcvhdrsize);
- } else if (rhdrsize > (dd->ipath_rcvhdrentsize -
- (sizeof(u64) / sizeof(u32)))) {
- ipath_dbg("Error: can't set protocol header size %u "
- "(> max %u)\n", rhdrsize,
- dd->ipath_rcvhdrentsize -
- (u32) (sizeof(u64) / sizeof(u32)));
- ret = -EOVERFLOW;
- } else {
- dd->ipath_flags |= IPATH_RCVHDRSZ_SET;
- dd->ipath_rcvhdrsize = rhdrsize;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
- dd->ipath_rcvhdrsize);
- ipath_cdbg(VERBOSE, "Set protocol header size to %u\n",
- dd->ipath_rcvhdrsize);
- }
- return ret;
-}
-
-/*
- * debugging code and stats updates if no pio buffers available.
- */
-static noinline void no_pio_bufs(struct ipath_devdata *dd)
-{
- unsigned long *shadow = dd->ipath_pioavailshadow;
- __le64 *dma = (__le64 *)dd->ipath_pioavailregs_dma;
-
- dd->ipath_upd_pio_shadow = 1;
-
- /*
- * not atomic, but if we lose a stat count in a while, that's OK
- */
- ipath_stats.sps_nopiobufs++;
- if (!(++dd->ipath_consec_nopiobuf % 100000)) {
- ipath_force_pio_avail_update(dd); /* at start */
- ipath_dbg("%u tries no piobufavail ts%lx; dmacopy: "
- "%llx %llx %llx %llx\n"
- "ipath shadow: %lx %lx %lx %lx\n",
- dd->ipath_consec_nopiobuf,
- (unsigned long)get_cycles(),
- (unsigned long long) le64_to_cpu(dma[0]),
- (unsigned long long) le64_to_cpu(dma[1]),
- (unsigned long long) le64_to_cpu(dma[2]),
- (unsigned long long) le64_to_cpu(dma[3]),
- shadow[0], shadow[1], shadow[2], shadow[3]);
- /*
- * 4 buffers per byte, 4 registers above, cover rest
- * below
- */
- if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
- (sizeof(shadow[0]) * 4 * 4))
- ipath_dbg("2nd group: dmacopy: "
- "%llx %llx %llx %llx\n"
- "ipath shadow: %lx %lx %lx %lx\n",
- (unsigned long long)le64_to_cpu(dma[4]),
- (unsigned long long)le64_to_cpu(dma[5]),
- (unsigned long long)le64_to_cpu(dma[6]),
- (unsigned long long)le64_to_cpu(dma[7]),
- shadow[4], shadow[5], shadow[6], shadow[7]);
-
- /* at end, so update likely happened */
- ipath_reset_availshadow(dd);
- }
-}
-
-/*
- * common code for normal driver pio buffer allocation, and reserved
- * allocation.
- *
- * do appropriate marking as busy, etc.
- * returns buffer number if one found (>=0), negative number is error.
- */
-static u32 __iomem *ipath_getpiobuf_range(struct ipath_devdata *dd,
- u32 *pbufnum, u32 first, u32 last, u32 firsti)
-{
- int i, j, updated = 0;
- unsigned piobcnt;
- unsigned long flags;
- unsigned long *shadow = dd->ipath_pioavailshadow;
- u32 __iomem *buf;
-
- piobcnt = last - first;
- if (dd->ipath_upd_pio_shadow) {
- /*
- * Minor optimization. If we had no buffers on last call,
- * start out by doing the update; continue and do scan even
- * if no buffers were updated, to be paranoid
- */
- ipath_update_pio_bufs(dd);
- updated++;
- i = first;
- } else
- i = firsti;
-rescan:
- /*
- * while test_and_set_bit() is atomic, we do that and then the
- * change_bit(), and the pair is not. See if this is the cause
- * of the remaining armlaunch errors.
- */
- spin_lock_irqsave(&ipath_pioavail_lock, flags);
- for (j = 0; j < piobcnt; j++, i++) {
- if (i >= last)
- i = first;
- if (__test_and_set_bit((2 * i) + 1, shadow))
- continue;
- /* flip generation bit */
- __change_bit(2 * i, shadow);
- break;
- }
- spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
-
- if (j == piobcnt) {
- if (!updated) {
- /*
- * first time through; shadow exhausted, but may be
- * buffers available, try an update and then rescan.
- */
- ipath_update_pio_bufs(dd);
- updated++;
- i = first;
- goto rescan;
- } else if (updated == 1 && piobcnt <=
- ((dd->ipath_sendctrl
- >> INFINIPATH_S_UPDTHRESH_SHIFT) &
- INFINIPATH_S_UPDTHRESH_MASK)) {
- /*
- * for chips supporting and using the update
- * threshold we need to force an update of the
- * in-memory copy if the count is less than the
- * thershold, then check one more time.
- */
- ipath_force_pio_avail_update(dd);
- ipath_update_pio_bufs(dd);
- updated++;
- i = first;
- goto rescan;
- }
-
- no_pio_bufs(dd);
- buf = NULL;
- } else {
- if (i < dd->ipath_piobcnt2k)
- buf = (u32 __iomem *) (dd->ipath_pio2kbase +
- i * dd->ipath_palign);
- else
- buf = (u32 __iomem *)
- (dd->ipath_pio4kbase +
- (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
- if (pbufnum)
- *pbufnum = i;
- }
-
- return buf;
-}
-
-/**
- * ipath_getpiobuf - find an available pio buffer
- * @dd: the infinipath device
- * @plen: the size of the PIO buffer needed in 32-bit words
- * @pbufnum: the buffer number is placed here
- */
-u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 plen, u32 *pbufnum)
-{
- u32 __iomem *buf;
- u32 pnum, nbufs;
- u32 first, lasti;
-
- if (plen + 1 >= IPATH_SMALLBUF_DWORDS) {
- first = dd->ipath_piobcnt2k;
- lasti = dd->ipath_lastpioindexl;
- } else {
- first = 0;
- lasti = dd->ipath_lastpioindex;
- }
- nbufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
- buf = ipath_getpiobuf_range(dd, &pnum, first, nbufs, lasti);
-
- if (buf) {
- /*
- * Set next starting place. It's just an optimization,
- * it doesn't matter who wins on this, so no locking
- */
- if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
- dd->ipath_lastpioindexl = pnum + 1;
- else
- dd->ipath_lastpioindex = pnum + 1;
- if (dd->ipath_upd_pio_shadow)
- dd->ipath_upd_pio_shadow = 0;
- if (dd->ipath_consec_nopiobuf)
- dd->ipath_consec_nopiobuf = 0;
- ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
- pnum, (pnum < dd->ipath_piobcnt2k) ? 2 : 4, buf);
- if (pbufnum)
- *pbufnum = pnum;
-
- }
- return buf;
-}
-
-/**
- * ipath_chg_pioavailkernel - change which send buffers are available for kernel
- * @dd: the infinipath device
- * @start: the starting send buffer number
- * @len: the number of send buffers
- * @avail: true if the buffers are available for kernel use, false otherwise
- */
-void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
- unsigned len, int avail)
-{
- unsigned long flags;
- unsigned end, cnt = 0;
-
- /* There are two bits per send buffer (busy and generation) */
- start *= 2;
- end = start + len * 2;
-
- spin_lock_irqsave(&ipath_pioavail_lock, flags);
- /* Set or clear the busy bit in the shadow. */
- while (start < end) {
- if (avail) {
- unsigned long dma;
- int i, im;
- /*
- * the BUSY bit will never be set, because we disarm
- * the user buffers before we hand them back to the
- * kernel. We do have to make sure the generation
- * bit is set correctly in shadow, since it could
- * have changed many times while allocated to user.
- * We can't use the bitmap functions on the full
- * dma array because it is always little-endian, so
- * we have to flip to host-order first.
- * BITS_PER_LONG is slightly wrong, since it's
- * always 64 bits per register in chip...
- * We only work on 64 bit kernels, so that's OK.
- */
- /* deal with 6110 chip bug on high register #s */
- i = start / BITS_PER_LONG;
- im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
- i ^ 1 : i;
- __clear_bit(INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT
- + start, dd->ipath_pioavailshadow);
- dma = (unsigned long) le64_to_cpu(
- dd->ipath_pioavailregs_dma[im]);
- if (test_bit((INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
- + start) % BITS_PER_LONG, &dma))
- __set_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
- + start, dd->ipath_pioavailshadow);
- else
- __clear_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
- + start, dd->ipath_pioavailshadow);
- __set_bit(start, dd->ipath_pioavailkernel);
- } else {
- __set_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
- dd->ipath_pioavailshadow);
- __clear_bit(start, dd->ipath_pioavailkernel);
- }
- start += 2;
- }
-
- if (dd->ipath_pioupd_thresh) {
- end = 2 * (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
- cnt = bitmap_weight(dd->ipath_pioavailkernel, end);
- }
- spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
-
- /*
- * When moving buffers from kernel to user, if number assigned to
- * the user is less than the pio update threshold, and threshold
- * is supported (cnt was computed > 0), drop the update threshold
- * so we update at least once per allocated number of buffers.
- * In any case, if the kernel buffers are less than the threshold,
- * drop the threshold. We don't bother increasing it, having once
- * decreased it, since it would typically just cycle back and forth.
- * If we don't decrease below buffers in use, we can wait a long
- * time for an update, until some other context uses PIO buffers.
- */
- if (!avail && len < cnt)
- cnt = len;
- if (cnt < dd->ipath_pioupd_thresh) {
- dd->ipath_pioupd_thresh = cnt;
- ipath_dbg("Decreased pio update threshold to %u\n",
- dd->ipath_pioupd_thresh);
- spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
- dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK
- << INFINIPATH_S_UPDTHRESH_SHIFT);
- dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
- << INFINIPATH_S_UPDTHRESH_SHIFT;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- dd->ipath_sendctrl);
- spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
- }
-}
-
-/**
- * ipath_create_rcvhdrq - create a receive header queue
- * @dd: the infinipath device
- * @pd: the port data
- *
- * this must be contiguous memory (from an i/o perspective), and must be
- * DMA'able (which means for some systems, it will go through an IOMMU,
- * or be forced into a low address range).
- */
-int ipath_create_rcvhdrq(struct ipath_devdata *dd,
- struct ipath_portdata *pd)
-{
- int ret = 0;
-
- if (!pd->port_rcvhdrq) {
- dma_addr_t phys_hdrqtail;
- gfp_t gfp_flags = GFP_USER | __GFP_COMP;
- int amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
- sizeof(u32), PAGE_SIZE);
-
- pd->port_rcvhdrq = dma_alloc_coherent(
- &dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys,
- gfp_flags);
-
- if (!pd->port_rcvhdrq) {
- ipath_dev_err(dd, "attempt to allocate %d bytes "
- "for port %u rcvhdrq failed\n",
- amt, pd->port_port);
- ret = -ENOMEM;
- goto bail;
- }
-
- if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
- pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent(
- &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
- GFP_KERNEL);
- if (!pd->port_rcvhdrtail_kvaddr) {
- ipath_dev_err(dd, "attempt to allocate 1 page "
- "for port %u rcvhdrqtailaddr "
- "failed\n", pd->port_port);
- ret = -ENOMEM;
- dma_free_coherent(&dd->pcidev->dev, amt,
- pd->port_rcvhdrq,
- pd->port_rcvhdrq_phys);
- pd->port_rcvhdrq = NULL;
- goto bail;
- }
- pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
- ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx "
- "physical\n", pd->port_port,
- (unsigned long long) phys_hdrqtail);
- }
-
- pd->port_rcvhdrq_size = amt;
-
- ipath_cdbg(VERBOSE, "%d pages at %p (phys %lx) size=%lu "
- "for port %u rcvhdr Q\n",
- amt >> PAGE_SHIFT, pd->port_rcvhdrq,
- (unsigned long) pd->port_rcvhdrq_phys,
- (unsigned long) pd->port_rcvhdrq_size,
- pd->port_port);
- } else {
- ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; "
- "hdrtailaddr@%p %llx physical\n",
- pd->port_port, pd->port_rcvhdrq,
- (unsigned long long) pd->port_rcvhdrq_phys,
- pd->port_rcvhdrtail_kvaddr, (unsigned long long)
- pd->port_rcvhdrqtailaddr_phys);
- }
- /* clear for security and sanity on each use */
- memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size);
- if (pd->port_rcvhdrtail_kvaddr)
- memset(pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE);
-
- /*
- * tell chip each time we init it, even if we are re-using previous
- * memory (we zero the register at process close)
- */
- ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
- pd->port_port, pd->port_rcvhdrqtailaddr_phys);
- ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
- pd->port_port, pd->port_rcvhdrq_phys);
-
-bail:
- return ret;
-}
-
-
-/*
- * Flush all sends that might be in the ready to send state, as well as any
- * that are in the process of being sent. Used whenever we need to be
- * sure the send side is idle. Cleans up all buffer state by canceling
- * all pio buffers, and issuing an abort, which cleans up anything in the
- * launch fifo. The cancel is superfluous on some chip versions, but
- * it's safer to always do it.
- * PIOAvail bits are updated by the chip as if normal send had happened.
- */
-void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
-{
- unsigned long flags;
-
- if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
- ipath_cdbg(VERBOSE, "Ignore while in autonegotiation\n");
- goto bail;
- }
- /*
- * If we have SDMA, and it's not disabled, we have to kick off the
- * abort state machine, provided we aren't already aborting.
- * If we are in the process of aborting SDMA (!DISABLED, but ABORTING),
- * we skip the rest of this routine. It is already "in progress"
- */
- if (dd->ipath_flags & IPATH_HAS_SEND_DMA) {
- int skip_cancel;
- unsigned long *statp = &dd->ipath_sdma_status;
-
- spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
- skip_cancel =
- test_and_set_bit(IPATH_SDMA_ABORTING, statp)
- && !test_bit(IPATH_SDMA_DISABLED, statp);
- spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
- if (skip_cancel)
- goto bail;
- }
-
- ipath_dbg("Cancelling all in-progress send buffers\n");
-
- /* skip armlaunch errs for a while */
- dd->ipath_lastcancel = jiffies + HZ / 2;
-
- /*
- * The abort bit is auto-clearing. We also don't want pioavail
- * update happening during this, and we don't want any other
- * sends going out, so turn those off for the duration. We read
- * the scratch register to be sure that cancels and the abort
- * have taken effect in the chip. Otherwise two parts are same
- * as ipath_force_pio_avail_update()
- */
- spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
- dd->ipath_sendctrl &= ~(INFINIPATH_S_PIOBUFAVAILUPD
- | INFINIPATH_S_PIOENABLE);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- dd->ipath_sendctrl | INFINIPATH_S_ABORT);
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
- /* disarm all send buffers */
- ipath_disarm_piobufs(dd, 0,
- dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
-
- if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
- set_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
-
- if (restore_sendctrl) {
- /* else done by caller later if needed */
- spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
- dd->ipath_sendctrl |= INFINIPATH_S_PIOBUFAVAILUPD |
- INFINIPATH_S_PIOENABLE;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- dd->ipath_sendctrl);
- /* and again, be sure all have hit the chip */
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
- }
-
- if ((dd->ipath_flags & IPATH_HAS_SEND_DMA) &&
- !test_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status) &&
- test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)) {
- spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
- /* only wait so long for intr */
- dd->ipath_sdma_abort_intr_timeout = jiffies + HZ;
- dd->ipath_sdma_reset_wait = 200;
- if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
- tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
- spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
- }
-bail:;
-}
-
-/*
- * Force an update of in-memory copy of the pioavail registers, when
- * needed for any of a variety of reasons. We read the scratch register
- * to make it highly likely that the update will have happened by the
- * time we return. If already off (as in cancel_sends above), this
- * routine is a nop, on the assumption that the caller will "do the
- * right thing".
- */
-void ipath_force_pio_avail_update(struct ipath_devdata *dd)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
- if (dd->ipath_sendctrl & INFINIPATH_S_PIOBUFAVAILUPD) {
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- dd->ipath_sendctrl);
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- }
- spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-}
-
-static void ipath_set_ib_lstate(struct ipath_devdata *dd, int linkcmd,
- int linitcmd)
-{
- u64 mod_wd;
- static const char *what[4] = {
- [0] = "NOP",
- [INFINIPATH_IBCC_LINKCMD_DOWN] = "DOWN",
- [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED",
- [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE"
- };
-
- if (linitcmd == INFINIPATH_IBCC_LINKINITCMD_DISABLE) {
- /*
- * If we are told to disable, note that so link-recovery
- * code does not attempt to bring us back up.
- */
- preempt_disable();
- dd->ipath_flags |= IPATH_IB_LINK_DISABLED;
- preempt_enable();
- } else if (linitcmd) {
- /*
- * Any other linkinitcmd will lead to LINKDOWN and then
- * to INIT (if all is well), so clear flag to let
- * link-recovery code attempt to bring us back up.
- */
- preempt_disable();
- dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
- preempt_enable();
- }
-
- mod_wd = (linkcmd << dd->ibcc_lc_shift) |
- (linitcmd << INFINIPATH_IBCC_LINKINITCMD_SHIFT);
- ipath_cdbg(VERBOSE,
- "Moving unit %u to %s (initcmd=0x%x), current ltstate is %s\n",
- dd->ipath_unit, what[linkcmd], linitcmd,
- ipath_ibcstatus_str[ipath_ib_linktrstate(dd,
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus))]);
-
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
- dd->ipath_ibcctrl | mod_wd);
- /* read from chip so write is flushed */
- (void) ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
-}
-
-int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
-{
- u32 lstate;
- int ret;
-
- switch (newstate) {
- case IPATH_IB_LINKDOWN_ONLY:
- ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN, 0);
- /* don't wait */
- ret = 0;
- goto bail;
-
- case IPATH_IB_LINKDOWN:
- ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
- INFINIPATH_IBCC_LINKINITCMD_POLL);
- /* don't wait */
- ret = 0;
- goto bail;
-
- case IPATH_IB_LINKDOWN_SLEEP:
- ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
- INFINIPATH_IBCC_LINKINITCMD_SLEEP);
- /* don't wait */
- ret = 0;
- goto bail;
-
- case IPATH_IB_LINKDOWN_DISABLE:
- ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
- INFINIPATH_IBCC_LINKINITCMD_DISABLE);
- /* don't wait */
- ret = 0;
- goto bail;
-
- case IPATH_IB_LINKARM:
- if (dd->ipath_flags & IPATH_LINKARMED) {
- ret = 0;
- goto bail;
- }
- if (!(dd->ipath_flags &
- (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
- ret = -EINVAL;
- goto bail;
- }
- ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED, 0);
-
- /*
- * Since the port can transition to ACTIVE by receiving
- * a non VL 15 packet, wait for either state.
- */
- lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
- break;
-
- case IPATH_IB_LINKACTIVE:
- if (dd->ipath_flags & IPATH_LINKACTIVE) {
- ret = 0;
- goto bail;
- }
- if (!(dd->ipath_flags & IPATH_LINKARMED)) {
- ret = -EINVAL;
- goto bail;
- }
- ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE, 0);
- lstate = IPATH_LINKACTIVE;
- break;
-
- case IPATH_IB_LINK_LOOPBACK:
- dev_info(&dd->pcidev->dev, "Enabling IB local loopback\n");
- dd->ipath_ibcctrl |= INFINIPATH_IBCC_LOOPBACK;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
- dd->ipath_ibcctrl);
-
- /* turn heartbeat off, as it causes loopback to fail */
- dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
- IPATH_IB_HRTBT_OFF);
- /* don't wait */
- ret = 0;
- goto bail;
-
- case IPATH_IB_LINK_EXTERNAL:
- dev_info(&dd->pcidev->dev,
- "Disabling IB local loopback (normal)\n");
- dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
- IPATH_IB_HRTBT_ON);
- dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LOOPBACK;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
- dd->ipath_ibcctrl);
- /* don't wait */
- ret = 0;
- goto bail;
-
- /*
- * Heartbeat can be explicitly enabled by the user via
- * "hrtbt_enable" "file", and if disabled, trying to enable here
- * will have no effect. Implicit changes (heartbeat off when
- * loopback on, and vice versa) are included to ease testing.
- */
- case IPATH_IB_LINK_HRTBT:
- ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
- IPATH_IB_HRTBT_ON);
- goto bail;
-
- case IPATH_IB_LINK_NO_HRTBT:
- ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
- IPATH_IB_HRTBT_OFF);
- goto bail;
-
- default:
- ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
- ret = -EINVAL;
- goto bail;
- }
- ret = ipath_wait_linkstate(dd, lstate, 2000);
-
-bail:
- return ret;
-}
-
-/**
- * ipath_set_mtu - set the MTU
- * @dd: the infinipath device
- * @arg: the new MTU
- *
- * we can handle "any" incoming size, the issue here is whether we
- * need to restrict our outgoing size. For now, we don't do any
- * sanity checking on this, and we don't deal with what happens to
- * programs that are already running when the size changes.
- * NOTE: changing the MTU will usually cause the IBC to go back to
- * link INIT state...
- */
-int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
-{
- u32 piosize;
- int changed = 0;
- int ret;
-
- /*
- * mtu is IB data payload max. It's the largest power of 2 less
- * than piosize (or even larger, since it only really controls the
- * largest we can receive; we can send the max of the mtu and
- * piosize). We check that it's one of the valid IB sizes.
- */
- if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
- (arg != 4096 || !ipath_mtu4096)) {
- ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
- ret = -EINVAL;
- goto bail;
- }
- if (dd->ipath_ibmtu == arg) {
- ret = 0; /* same as current */
- goto bail;
- }
-
- piosize = dd->ipath_ibmaxlen;
- dd->ipath_ibmtu = arg;
-
- if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
- /* Only if it's not the initial value (or reset to it) */
- if (piosize != dd->ipath_init_ibmaxlen) {
- if (arg > piosize && arg <= dd->ipath_init_ibmaxlen)
- piosize = dd->ipath_init_ibmaxlen;
- dd->ipath_ibmaxlen = piosize;
- changed = 1;
- }
- } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
- piosize = arg + IPATH_PIO_MAXIBHDR;
- ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
- "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
- arg);
- dd->ipath_ibmaxlen = piosize;
- changed = 1;
- }
-
- if (changed) {
- u64 ibc = dd->ipath_ibcctrl, ibdw;
- /*
- * update our housekeeping variables, and set IBC max
- * size, same as init code; max IBC is max we allow in
- * buffer, less the qword pbc, plus 1 for ICRC, in dwords
- */
- dd->ipath_ibmaxlen = piosize - 2 * sizeof(u32);
- ibdw = (dd->ipath_ibmaxlen >> 2) + 1;
- ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
- dd->ibcc_mpl_shift);
- ibc |= ibdw << dd->ibcc_mpl_shift;
- dd->ipath_ibcctrl = ibc;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
- dd->ipath_ibcctrl);
- dd->ipath_f_tidtemplate(dd);
- }
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-int ipath_set_lid(struct ipath_devdata *dd, u32 lid, u8 lmc)
-{
- dd->ipath_lid = lid;
- dd->ipath_lmc = lmc;
-
- dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LIDLMC, lid |
- (~((1U << lmc) - 1)) << 16);
-
- dev_info(&dd->pcidev->dev, "We got a lid: 0x%x\n", lid);
-
- return 0;
-}
-
-
-/**
- * ipath_write_kreg_port - write a device's per-port 64-bit kernel register
- * @dd: the infinipath device
- * @regno: the register number to write
- * @port: the port containing the register
- * @value: the value to write
- *
- * Registers that vary with the chip implementation constants (port)
- * use this routine.
- */
-void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
- unsigned port, u64 value)
-{
- u16 where;
-
- if (port < dd->ipath_portcnt &&
- (regno == dd->ipath_kregs->kr_rcvhdraddr ||
- regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
- where = regno + port;
- else
- where = -1;
-
- ipath_write_kreg(dd, where, value);
-}
-
-/*
- * Following deal with the "obviously simple" task of overriding the state
- * of the LEDS, which normally indicate link physical and logical status.
- * The complications arise in dealing with different hardware mappings
- * and the board-dependent routine being called from interrupts.
- * and then there's the requirement to _flash_ them.
- */
-#define LED_OVER_FREQ_SHIFT 8
-#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
-/* Below is "non-zero" to force override, but both actual LEDs are off */
-#define LED_OVER_BOTH_OFF (8)
-
-static void ipath_run_led_override(unsigned long opaque)
-{
- struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
- int timeoff;
- int pidx;
- u64 lstate, ltstate, val;
-
- if (!(dd->ipath_flags & IPATH_INITTED))
- return;
-
- pidx = dd->ipath_led_override_phase++ & 1;
- dd->ipath_led_override = dd->ipath_led_override_vals[pidx];
- timeoff = dd->ipath_led_override_timeoff;
-
- /*
- * below potentially restores the LED values per current status,
- * should also possibly setup the traffic-blink register,
- * but leave that to per-chip functions.
- */
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
- ltstate = ipath_ib_linktrstate(dd, val);
- lstate = ipath_ib_linkstate(dd, val);
-
- dd->ipath_f_setextled(dd, lstate, ltstate);
- mod_timer(&dd->ipath_led_override_timer, jiffies + timeoff);
-}
-
-void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val)
-{
- int timeoff, freq;
-
- if (!(dd->ipath_flags & IPATH_INITTED))
- return;
-
- /* First check if we are blinking. If not, use 1HZ polling */
- timeoff = HZ;
- freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
-
- if (freq) {
- /* For blink, set each phase from one nybble of val */
- dd->ipath_led_override_vals[0] = val & 0xF;
- dd->ipath_led_override_vals[1] = (val >> 4) & 0xF;
- timeoff = (HZ << 4)/freq;
- } else {
- /* Non-blink set both phases the same. */
- dd->ipath_led_override_vals[0] = val & 0xF;
- dd->ipath_led_override_vals[1] = val & 0xF;
- }
- dd->ipath_led_override_timeoff = timeoff;
-
- /*
- * If the timer has not already been started, do so. Use a "quick"
- * timeout so the function will be called soon, to look at our request.
- */
- if (atomic_inc_return(&dd->ipath_led_override_timer_active) == 1) {
- /* Need to start timer */
- setup_timer(&dd->ipath_led_override_timer,
- ipath_run_led_override, (unsigned long)dd);
-
- dd->ipath_led_override_timer.expires = jiffies + 1;
- add_timer(&dd->ipath_led_override_timer);
- } else
- atomic_dec(&dd->ipath_led_override_timer_active);
-}
-
-/**
- * ipath_shutdown_device - shut down a device
- * @dd: the infinipath device
- *
- * This is called to make the device quiet when we are about to
- * unload the driver, and also when the device is administratively
- * disabled. It does not free any data structures.
- * Everything it does has to be setup again by ipath_init_chip(dd,1)
- */
-void ipath_shutdown_device(struct ipath_devdata *dd)
-{
- unsigned long flags;
-
- ipath_dbg("Shutting down the device\n");
-
- ipath_hol_up(dd); /* make sure user processes aren't suspended */
-
- dd->ipath_flags |= IPATH_LINKUNK;
- dd->ipath_flags &= ~(IPATH_INITTED | IPATH_LINKDOWN |
- IPATH_LINKINIT | IPATH_LINKARMED |
- IPATH_LINKACTIVE);
- *dd->ipath_statusp &= ~(IPATH_STATUS_IB_CONF |
- IPATH_STATUS_IB_READY);
-
- /* mask interrupts, but not errors */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
-
- dd->ipath_rcvctrl = 0;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
- dd->ipath_rcvctrl);
-
- if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
- teardown_sdma(dd);
-
- /*
- * gracefully stop all sends allowing any in progress to trickle out
- * first.
- */
- spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
- dd->ipath_sendctrl = 0;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
- /* flush it */
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
- /*
- * enough for anything that's going to trickle out to have actually
- * done so.
- */
- udelay(5);
-
- dd->ipath_f_setextled(dd, 0, 0); /* make sure LEDs are off */
-
- ipath_set_ib_lstate(dd, 0, INFINIPATH_IBCC_LINKINITCMD_DISABLE);
- ipath_cancel_sends(dd, 0);
-
- /*
- * we are shutting down, so tell components that care. We don't do
- * this on just a link state change, much like ethernet, a cable
- * unplug, etc. doesn't change driver state
- */
- signal_ib_event(dd, IB_EVENT_PORT_ERR);
-
- /* disable IBC */
- dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
- dd->ipath_control | INFINIPATH_C_FREEZEMODE);
-
- /*
- * clear SerdesEnable and turn the leds off; do this here because
- * we are unloading, so don't count on interrupts to move along
- * Turn the LEDs off explicitly for the same reason.
- */
- dd->ipath_f_quiet_serdes(dd);
-
- /* stop all the timers that might still be running */
- del_timer_sync(&dd->ipath_hol_timer);
- if (dd->ipath_stats_timer_active) {
- del_timer_sync(&dd->ipath_stats_timer);
- dd->ipath_stats_timer_active = 0;
- }
- if (dd->ipath_intrchk_timer.data) {
- del_timer_sync(&dd->ipath_intrchk_timer);
- dd->ipath_intrchk_timer.data = 0;
- }
- if (atomic_read(&dd->ipath_led_override_timer_active)) {
- del_timer_sync(&dd->ipath_led_override_timer);
- atomic_set(&dd->ipath_led_override_timer_active, 0);
- }
-
- /*
- * clear all interrupts and errors, so that the next time the driver
- * is loaded or device is enabled, we know that whatever is set
- * happened while we were unloaded
- */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
- ~0ULL & ~INFINIPATH_HWE_MEMBISTFAILED);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
-
- ipath_cdbg(VERBOSE, "Flush time and errors to EEPROM\n");
- ipath_update_eeprom_log(dd);
-}
-
-/**
- * ipath_free_pddata - free a port's allocated data
- * @dd: the infinipath device
- * @pd: the portdata structure
- *
- * free up any allocated data for a port
- * This should not touch anything that would affect a simultaneous
- * re-allocation of port data, because it is called after ipath_mutex
- * is released (and can be called from reinit as well).
- * It should never change any chip state, or global driver state.
- * (The only exception to global state is freeing the port0 port0_skbs.)
- */
-void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
-{
- if (!pd)
- return;
-
- if (pd->port_rcvhdrq) {
- ipath_cdbg(VERBOSE, "free closed port %d rcvhdrq @ %p "
- "(size=%lu)\n", pd->port_port, pd->port_rcvhdrq,
- (unsigned long) pd->port_rcvhdrq_size);
- dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size,
- pd->port_rcvhdrq, pd->port_rcvhdrq_phys);
- pd->port_rcvhdrq = NULL;
- if (pd->port_rcvhdrtail_kvaddr) {
- dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
- pd->port_rcvhdrtail_kvaddr,
- pd->port_rcvhdrqtailaddr_phys);
- pd->port_rcvhdrtail_kvaddr = NULL;
- }
- }
- if (pd->port_port && pd->port_rcvegrbuf) {
- unsigned e;
-
- for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
- void *base = pd->port_rcvegrbuf[e];
- size_t size = pd->port_rcvegrbuf_size;
-
- ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), "
- "chunk %u/%u\n", base,
- (unsigned long) size,
- e, pd->port_rcvegrbuf_chunks);
- dma_free_coherent(&dd->pcidev->dev, size,
- base, pd->port_rcvegrbuf_phys[e]);
- }
- kfree(pd->port_rcvegrbuf);
- pd->port_rcvegrbuf = NULL;
- kfree(pd->port_rcvegrbuf_phys);
- pd->port_rcvegrbuf_phys = NULL;
- pd->port_rcvegrbuf_chunks = 0;
- } else if (pd->port_port == 0 && dd->ipath_port0_skbinfo) {
- unsigned e;
- struct ipath_skbinfo *skbinfo = dd->ipath_port0_skbinfo;
-
- dd->ipath_port0_skbinfo = NULL;
- ipath_cdbg(VERBOSE, "free closed port %d "
- "ipath_port0_skbinfo @ %p\n", pd->port_port,
- skbinfo);
- for (e = 0; e < dd->ipath_p0_rcvegrcnt; e++)
- if (skbinfo[e].skb) {
- pci_unmap_single(dd->pcidev, skbinfo[e].phys,
- dd->ipath_ibmaxlen,
- PCI_DMA_FROMDEVICE);
- dev_kfree_skb(skbinfo[e].skb);
- }
- vfree(skbinfo);
- }
- kfree(pd->port_tid_pg_list);
- vfree(pd->subport_uregbase);
- vfree(pd->subport_rcvegrbuf);
- vfree(pd->subport_rcvhdr_base);
- kfree(pd);
-}
-
-static int __init infinipath_init(void)
-{
- int ret;
-
- if (ipath_debug & __IPATH_DBG)
- printk(KERN_INFO DRIVER_LOAD_MSG "%s", ib_ipath_version);
-
- /*
- * These must be called before the driver is registered with
- * the PCI subsystem.
- */
- idr_init(&unit_table);
-
- ret = pci_register_driver(&ipath_driver);
- if (ret < 0) {
- printk(KERN_ERR IPATH_DRV_NAME
- ": Unable to register driver: error %d\n", -ret);
- goto bail_unit;
- }
-
- ret = ipath_init_ipathfs();
- if (ret < 0) {
- printk(KERN_ERR IPATH_DRV_NAME ": Unable to create "
- "ipathfs: error %d\n", -ret);
- goto bail_pci;
- }
-
- goto bail;
-
-bail_pci:
- pci_unregister_driver(&ipath_driver);
-
-bail_unit:
- idr_destroy(&unit_table);
-
-bail:
- return ret;
-}
-
-static void __exit infinipath_cleanup(void)
-{
- ipath_exit_ipathfs();
-
- ipath_cdbg(VERBOSE, "Unregistering pci driver\n");
- pci_unregister_driver(&ipath_driver);
-
- idr_destroy(&unit_table);
-}
-
-/**
- * ipath_reset_device - reset the chip if possible
- * @unit: the device to reset
- *
- * Whether or not reset is successful, we attempt to re-initialize the chip
- * (that is, much like a driver unload/reload). We clear the INITTED flag
- * so that the various entry points will fail until we reinitialize. For
- * now, we only allow this if no user ports are open that use chip resources
- */
-int ipath_reset_device(int unit)
-{
- int ret, i;
- struct ipath_devdata *dd = ipath_lookup(unit);
- unsigned long flags;
-
- if (!dd) {
- ret = -ENODEV;
- goto bail;
- }
-
- if (atomic_read(&dd->ipath_led_override_timer_active)) {
- /* Need to stop LED timer, _then_ shut off LEDs */
- del_timer_sync(&dd->ipath_led_override_timer);
- atomic_set(&dd->ipath_led_override_timer_active, 0);
- }
-
- /* Shut off LEDs after we are sure timer is not running */
- dd->ipath_led_override = LED_OVER_BOTH_OFF;
- dd->ipath_f_setextled(dd, 0, 0);
-
- dev_info(&dd->pcidev->dev, "Reset on unit %u requested\n", unit);
-
- if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) {
- dev_info(&dd->pcidev->dev, "Invalid unit number %u or "
- "not initialized or not present\n", unit);
- ret = -ENXIO;
- goto bail;
- }
-
- spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
- if (dd->ipath_pd)
- for (i = 1; i < dd->ipath_cfgports; i++) {
- if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
- continue;
- spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
- ipath_dbg("unit %u port %d is in use "
- "(PID %u cmd %s), can't reset\n",
- unit, i,
- pid_nr(dd->ipath_pd[i]->port_pid),
- dd->ipath_pd[i]->port_comm);
- ret = -EBUSY;
- goto bail;
- }
- spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
-
- if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
- teardown_sdma(dd);
-
- dd->ipath_flags &= ~IPATH_INITTED;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
- ret = dd->ipath_f_reset(dd);
- if (ret == 1) {
- ipath_dbg("Reinitializing unit %u after reset attempt\n",
- unit);
- ret = ipath_init_chip(dd, 1);
- } else
- ret = -EAGAIN;
- if (ret)
- ipath_dev_err(dd, "Reinitialize unit %u after "
- "reset failed with %d\n", unit, ret);
- else
- dev_info(&dd->pcidev->dev, "Reinitialized unit %u after "
- "resetting\n", unit);
-
-bail:
- return ret;
-}
-
-/*
- * send a signal to all the processes that have the driver open
- * through the normal interfaces (i.e., everything other than diags
- * interface). Returns number of signalled processes.
- */
-static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
-{
- int i, sub, any = 0;
- struct pid *pid;
- unsigned long flags;
-
- if (!dd->ipath_pd)
- return 0;
-
- spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
- for (i = 1; i < dd->ipath_cfgports; i++) {
- if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
- continue;
- pid = dd->ipath_pd[i]->port_pid;
- if (!pid)
- continue;
-
- dev_info(&dd->pcidev->dev, "context %d in use "
- "(PID %u), sending signal %d\n",
- i, pid_nr(pid), sig);
- kill_pid(pid, sig, 1);
- any++;
- for (sub = 0; sub < INFINIPATH_MAX_SUBPORT; sub++) {
- pid = dd->ipath_pd[i]->port_subpid[sub];
- if (!pid)
- continue;
- dev_info(&dd->pcidev->dev, "sub-context "
- "%d:%d in use (PID %u), sending "
- "signal %d\n", i, sub, pid_nr(pid), sig);
- kill_pid(pid, sig, 1);
- any++;
- }
- }
- spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
- return any;
-}
-
-static void ipath_hol_signal_down(struct ipath_devdata *dd)
-{
- if (ipath_signal_procs(dd, SIGSTOP))
- ipath_dbg("Stopped some processes\n");
- ipath_cancel_sends(dd, 1);
-}
-
-
-static void ipath_hol_signal_up(struct ipath_devdata *dd)
-{
- if (ipath_signal_procs(dd, SIGCONT))
- ipath_dbg("Continued some processes\n");
-}
-
-/*
- * link is down, stop any users processes, and flush pending sends
- * to prevent HoL blocking, then start the HoL timer that
- * periodically continues, then stop procs, so they can detect
- * link down if they want, and do something about it.
- * Timer may already be running, so use mod_timer, not add_timer.
- */
-void ipath_hol_down(struct ipath_devdata *dd)
-{
- dd->ipath_hol_state = IPATH_HOL_DOWN;
- ipath_hol_signal_down(dd);
- dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
- dd->ipath_hol_timer.expires = jiffies +
- msecs_to_jiffies(ipath_hol_timeout_ms);
- mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires);
-}
-
-/*
- * link is up, continue any user processes, and ensure timer
- * is a nop, if running. Let timer keep running, if set; it
- * will nop when it sees the link is up
- */
-void ipath_hol_up(struct ipath_devdata *dd)
-{
- ipath_hol_signal_up(dd);
- dd->ipath_hol_state = IPATH_HOL_UP;
-}
-
-/*
- * toggle the running/not running state of user proceses
- * to prevent HoL blocking on chip resources, but still allow
- * user processes to do link down special case handling.
- * Should only be called via the timer
- */
-void ipath_hol_event(unsigned long opaque)
-{
- struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
-
- if (dd->ipath_hol_next == IPATH_HOL_DOWNSTOP
- && dd->ipath_hol_state != IPATH_HOL_UP) {
- dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
- ipath_dbg("Stopping processes\n");
- ipath_hol_signal_down(dd);
- } else { /* may do "extra" if also in ipath_hol_up() */
- dd->ipath_hol_next = IPATH_HOL_DOWNSTOP;
- ipath_dbg("Continuing processes\n");
- ipath_hol_signal_up(dd);
- }
- if (dd->ipath_hol_state == IPATH_HOL_UP)
- ipath_dbg("link's up, don't resched timer\n");
- else {
- dd->ipath_hol_timer.expires = jiffies +
- msecs_to_jiffies(ipath_hol_timeout_ms);
- mod_timer(&dd->ipath_hol_timer,
- dd->ipath_hol_timer.expires);
- }
-}
-
-int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
-{
- u64 val;
-
- if (new_pol_inv > INFINIPATH_XGXS_RX_POL_MASK)
- return -1;
- if (dd->ipath_rx_pol_inv != new_pol_inv) {
- dd->ipath_rx_pol_inv = new_pol_inv;
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
- val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
- INFINIPATH_XGXS_RX_POL_SHIFT);
- val |= ((u64)dd->ipath_rx_pol_inv) <<
- INFINIPATH_XGXS_RX_POL_SHIFT;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
- }
- return 0;
-}
-
-/*
- * Disable and enable the armlaunch error. Used for PIO bandwidth testing on
- * the 7220, which is count-based, rather than trigger-based. Safe for the
- * driver check, since it's at init. Not completely safe when used for
- * user-mode checking, since some error checking can be lost, but not
- * particularly risky, and only has problematic side-effects in the face of
- * very buggy user code. There is no reference counting, but that's also
- * fine, given the intended use.
- */
-void ipath_enable_armlaunch(struct ipath_devdata *dd)
-{
- dd->ipath_lasterror &= ~INFINIPATH_E_SPIOARMLAUNCH;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
- INFINIPATH_E_SPIOARMLAUNCH);
- dd->ipath_errormask |= INFINIPATH_E_SPIOARMLAUNCH;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
- dd->ipath_errormask);
-}
-
-void ipath_disable_armlaunch(struct ipath_devdata *dd)
-{
- /* so don't re-enable if already set */
- dd->ipath_maskederrs &= ~INFINIPATH_E_SPIOARMLAUNCH;
- dd->ipath_errormask &= ~INFINIPATH_E_SPIOARMLAUNCH;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
- dd->ipath_errormask);
-}
-
-module_init(infinipath_init);
-module_exit(infinipath_cleanup);
diff --git a/drivers/staging/rdma/ipath/ipath_eeprom.c b/drivers/staging/rdma/ipath/ipath_eeprom.c
deleted file mode 100644
index ef84107c7ce0..000000000000
--- a/drivers/staging/rdma/ipath/ipath_eeprom.c
+++ /dev/null
@@ -1,1183 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-
-#include "ipath_kernel.h"
-
-/*
- * InfiniPath I2C driver for a serial eeprom. This is not a generic
- * I2C interface. For a start, the device we're using (Atmel AT24C11)
- * doesn't work like a regular I2C device. It looks like one
- * electrically, but not logically. Normal I2C devices have a single
- * 7-bit or 10-bit I2C address that they respond to. Valid 7-bit
- * addresses range from 0x03 to 0x77. Addresses 0x00 to 0x02 and 0x78
- * to 0x7F are special reserved addresses (e.g. 0x00 is the "general
- * call" address.) The Atmel device, on the other hand, responds to ALL
- * 7-bit addresses. It's designed to be the only device on a given I2C
- * bus. A 7-bit address corresponds to the memory address within the
- * Atmel device itself.
- *
- * Also, the timing requirements mean more than simple software
- * bitbanging, with readbacks from chip to ensure timing (simple udelay
- * is not enough).
- *
- * This all means that accessing the device is specialized enough
- * that using the standard kernel I2C bitbanging interface would be
- * impossible. For example, the core I2C eeprom driver expects to find
- * a device at one or more of a limited set of addresses only. It doesn't
- * allow writing to an eeprom. It also doesn't provide any means of
- * accessing eeprom contents from within the kernel, only via sysfs.
- */
-
-/* Added functionality for IBA7220-based cards */
-#define IPATH_EEPROM_DEV_V1 0xA0
-#define IPATH_EEPROM_DEV_V2 0xA2
-#define IPATH_TEMP_DEV 0x98
-#define IPATH_BAD_DEV (IPATH_EEPROM_DEV_V2+2)
-#define IPATH_NO_DEV (0xFF)
-
-/*
- * The number of I2C chains is proliferating. Table below brings
- * some order to the madness. The basic principle is that the
- * table is scanned from the top, and a "probe" is made to the
- * device probe_dev. If that succeeds, the chain is considered
- * to be of that type, and dd->i2c_chain_type is set to the index+1
- * of the entry.
- * The +1 is so static initialization can mean "unknown, do probe."
- */
-static struct i2c_chain_desc {
- u8 probe_dev; /* If seen at probe, chain is this type */
- u8 eeprom_dev; /* Dev addr (if any) for EEPROM */
- u8 temp_dev; /* Dev Addr (if any) for Temp-sense */
-} i2c_chains[] = {
- { IPATH_BAD_DEV, IPATH_NO_DEV, IPATH_NO_DEV }, /* pre-iba7220 bds */
- { IPATH_EEPROM_DEV_V1, IPATH_EEPROM_DEV_V1, IPATH_TEMP_DEV}, /* V1 */
- { IPATH_EEPROM_DEV_V2, IPATH_EEPROM_DEV_V2, IPATH_TEMP_DEV}, /* V2 */
- { IPATH_NO_DEV }
-};
-
-enum i2c_type {
- i2c_line_scl = 0,
- i2c_line_sda
-};
-
-enum i2c_state {
- i2c_line_low = 0,
- i2c_line_high
-};
-
-#define READ_CMD 1
-#define WRITE_CMD 0
-
-/**
- * i2c_gpio_set - set a GPIO line
- * @dd: the infinipath device
- * @line: the line to set
- * @new_line_state: the state to set
- *
- * Returns 0 if the line was set to the new state successfully, non-zero
- * on error.
- */
-static int i2c_gpio_set(struct ipath_devdata *dd,
- enum i2c_type line,
- enum i2c_state new_line_state)
-{
- u64 out_mask, dir_mask, *gpioval;
- unsigned long flags = 0;
-
- gpioval = &dd->ipath_gpio_out;
-
- if (line == i2c_line_scl) {
- dir_mask = dd->ipath_gpio_scl;
- out_mask = (1UL << dd->ipath_gpio_scl_num);
- } else {
- dir_mask = dd->ipath_gpio_sda;
- out_mask = (1UL << dd->ipath_gpio_sda_num);
- }
-
- spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
- if (new_line_state == i2c_line_high) {
- /* tri-state the output rather than force high */
- dd->ipath_extctrl &= ~dir_mask;
- } else {
- /* config line to be an output */
- dd->ipath_extctrl |= dir_mask;
- }
- ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, dd->ipath_extctrl);
-
- /* set output as well (no real verify) */
- if (new_line_state == i2c_line_high)
- *gpioval |= out_mask;
- else
- *gpioval &= ~out_mask;
-
- ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_out, *gpioval);
- spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
-
- return 0;
-}
-
-/**
- * i2c_gpio_get - get a GPIO line state
- * @dd: the infinipath device
- * @line: the line to get
- * @curr_statep: where to put the line state
- *
- * Returns 0 if the line was set to the new state successfully, non-zero
- * on error. curr_state is not set on error.
- */
-static int i2c_gpio_get(struct ipath_devdata *dd,
- enum i2c_type line,
- enum i2c_state *curr_statep)
-{
- u64 read_val, mask;
- int ret;
- unsigned long flags = 0;
-
- /* check args */
- if (curr_statep == NULL) {
- ret = 1;
- goto bail;
- }
-
- /* config line to be an input */
- if (line == i2c_line_scl)
- mask = dd->ipath_gpio_scl;
- else
- mask = dd->ipath_gpio_sda;
-
- spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
- dd->ipath_extctrl &= ~mask;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, dd->ipath_extctrl);
- /*
- * Below is very unlikely to reflect true input state if Output
- * Enable actually changed.
- */
- read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
- spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
-
- if (read_val & mask)
- *curr_statep = i2c_line_high;
- else
- *curr_statep = i2c_line_low;
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * i2c_wait_for_writes - wait for a write
- * @dd: the infinipath device
- *
- * We use this instead of udelay directly, so we can make sure
- * that previous register writes have been flushed all the way
- * to the chip. Since we are delaying anyway, the cost doesn't
- * hurt, and makes the bit twiddling more regular
- */
-static void i2c_wait_for_writes(struct ipath_devdata *dd)
-{
- (void)ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
- rmb();
-}
-
-static void scl_out(struct ipath_devdata *dd, u8 bit)
-{
- udelay(1);
- i2c_gpio_set(dd, i2c_line_scl, bit ? i2c_line_high : i2c_line_low);
-
- i2c_wait_for_writes(dd);
-}
-
-static void sda_out(struct ipath_devdata *dd, u8 bit)
-{
- i2c_gpio_set(dd, i2c_line_sda, bit ? i2c_line_high : i2c_line_low);
-
- i2c_wait_for_writes(dd);
-}
-
-static u8 sda_in(struct ipath_devdata *dd, int wait)
-{
- enum i2c_state bit;
-
- if (i2c_gpio_get(dd, i2c_line_sda, &bit))
- ipath_dbg("get bit failed!\n");
-
- if (wait)
- i2c_wait_for_writes(dd);
-
- return bit == i2c_line_high ? 1U : 0;
-}
-
-/**
- * i2c_ackrcv - see if ack following write is true
- * @dd: the infinipath device
- */
-static int i2c_ackrcv(struct ipath_devdata *dd)
-{
- u8 ack_received;
-
- /* AT ENTRY SCL = LOW */
- /* change direction, ignore data */
- ack_received = sda_in(dd, 1);
- scl_out(dd, i2c_line_high);
- ack_received = sda_in(dd, 1) == 0;
- scl_out(dd, i2c_line_low);
- return ack_received;
-}
-
-/**
- * rd_byte - read a byte, leaving ACK, STOP, etc up to caller
- * @dd: the infinipath device
- *
- * Returns byte shifted out of device
- */
-static int rd_byte(struct ipath_devdata *dd)
-{
- int bit_cntr, data;
-
- data = 0;
-
- for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) {
- data <<= 1;
- scl_out(dd, i2c_line_high);
- data |= sda_in(dd, 0);
- scl_out(dd, i2c_line_low);
- }
- return data;
-}
-
-/**
- * wr_byte - write a byte, one bit at a time
- * @dd: the infinipath device
- * @data: the byte to write
- *
- * Returns 0 if we got the following ack, otherwise 1
- */
-static int wr_byte(struct ipath_devdata *dd, u8 data)
-{
- int bit_cntr;
- u8 bit;
-
- for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) {
- bit = (data >> bit_cntr) & 1;
- sda_out(dd, bit);
- scl_out(dd, i2c_line_high);
- scl_out(dd, i2c_line_low);
- }
- return (!i2c_ackrcv(dd)) ? 1 : 0;
-}
-
-static void send_ack(struct ipath_devdata *dd)
-{
- sda_out(dd, i2c_line_low);
- scl_out(dd, i2c_line_high);
- scl_out(dd, i2c_line_low);
- sda_out(dd, i2c_line_high);
-}
-
-/**
- * i2c_startcmd - transmit the start condition, followed by address/cmd
- * @dd: the infinipath device
- * @offset_dir: direction byte
- *
- * (both clock/data high, clock high, data low while clock is high)
- */
-static int i2c_startcmd(struct ipath_devdata *dd, u8 offset_dir)
-{
- int res;
-
- /* issue start sequence */
- sda_out(dd, i2c_line_high);
- scl_out(dd, i2c_line_high);
- sda_out(dd, i2c_line_low);
- scl_out(dd, i2c_line_low);
-
- /* issue length and direction byte */
- res = wr_byte(dd, offset_dir);
-
- if (res)
- ipath_cdbg(VERBOSE, "No ack to complete start\n");
-
- return res;
-}
-
-/**
- * stop_cmd - transmit the stop condition
- * @dd: the infinipath device
- *
- * (both clock/data low, clock high, data high while clock is high)
- */
-static void stop_cmd(struct ipath_devdata *dd)
-{
- scl_out(dd, i2c_line_low);
- sda_out(dd, i2c_line_low);
- scl_out(dd, i2c_line_high);
- sda_out(dd, i2c_line_high);
- udelay(2);
-}
-
-/**
- * eeprom_reset - reset I2C communication
- * @dd: the infinipath device
- */
-
-static int eeprom_reset(struct ipath_devdata *dd)
-{
- int clock_cycles_left = 9;
- u64 *gpioval = &dd->ipath_gpio_out;
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
- /* Make sure shadows are consistent */
- dd->ipath_extctrl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extctrl);
- *gpioval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_out);
- spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
-
- ipath_cdbg(VERBOSE, "Resetting i2c eeprom; initial gpioout reg "
- "is %llx\n", (unsigned long long) *gpioval);
-
- /*
- * This is to get the i2c into a known state, by first going low,
- * then tristate sda (and then tristate scl as first thing
- * in loop)
- */
- scl_out(dd, i2c_line_low);
- sda_out(dd, i2c_line_high);
-
- /* Clock up to 9 cycles looking for SDA hi, then issue START and STOP */
- while (clock_cycles_left--) {
- scl_out(dd, i2c_line_high);
-
- /* SDA seen high, issue START by dropping it while SCL high */
- if (sda_in(dd, 0)) {
- sda_out(dd, i2c_line_low);
- scl_out(dd, i2c_line_low);
- /* ATMEL spec says must be followed by STOP. */
- scl_out(dd, i2c_line_high);
- sda_out(dd, i2c_line_high);
- ret = 0;
- goto bail;
- }
-
- scl_out(dd, i2c_line_low);
- }
-
- ret = 1;
-
-bail:
- return ret;
-}
-
-/*
- * Probe for I2C device at specified address. Returns 0 for "success"
- * to match rest of this file.
- * Leave bus in "reasonable" state for further commands.
- */
-static int i2c_probe(struct ipath_devdata *dd, int devaddr)
-{
- int ret;
-
- ret = eeprom_reset(dd);
- if (ret) {
- ipath_dev_err(dd, "Failed reset probing device 0x%02X\n",
- devaddr);
- return ret;
- }
- /*
- * Reset no longer leaves bus in start condition, so normal
- * i2c_startcmd() will do.
- */
- ret = i2c_startcmd(dd, devaddr | READ_CMD);
- if (ret)
- ipath_cdbg(VERBOSE, "Failed startcmd for device 0x%02X\n",
- devaddr);
- else {
- /*
- * Device did respond. Complete a single-byte read, because some
- * devices apparently cannot handle STOP immediately after they
- * ACK the start-cmd.
- */
- int data;
- data = rd_byte(dd);
- stop_cmd(dd);
- ipath_cdbg(VERBOSE, "Response from device 0x%02X\n", devaddr);
- }
- return ret;
-}
-
-/*
- * Returns the "i2c type". This is a pointer to a struct that describes
- * the I2C chain on this board. To minimize impact on struct ipath_devdata,
- * the (small integer) index into the table is actually memoized, rather
- * then the pointer.
- * Memoization is because the type is determined on the first call per chip.
- * An alternative would be to move type determination to early
- * init code.
- */
-static struct i2c_chain_desc *ipath_i2c_type(struct ipath_devdata *dd)
-{
- int idx;
-
- /* Get memoized index, from previous successful probes */
- idx = dd->ipath_i2c_chain_type - 1;
- if (idx >= 0 && idx < (ARRAY_SIZE(i2c_chains) - 1))
- goto done;
-
- idx = 0;
- while (i2c_chains[idx].probe_dev != IPATH_NO_DEV) {
- /* if probe succeeds, this is type */
- if (!i2c_probe(dd, i2c_chains[idx].probe_dev))
- break;
- ++idx;
- }
-
- /*
- * Old EEPROM (first entry) may require a reset after probe,
- * rather than being able to "start" after "stop"
- */
- if (idx == 0)
- eeprom_reset(dd);
-
- if (i2c_chains[idx].probe_dev == IPATH_NO_DEV)
- idx = -1;
- else
- dd->ipath_i2c_chain_type = idx + 1;
-done:
- return (idx >= 0) ? i2c_chains + idx : NULL;
-}
-
-static int ipath_eeprom_internal_read(struct ipath_devdata *dd,
- u8 eeprom_offset, void *buffer, int len)
-{
- int ret;
- struct i2c_chain_desc *icd;
- u8 *bp = buffer;
-
- ret = 1;
- icd = ipath_i2c_type(dd);
- if (!icd)
- goto bail;
-
- if (icd->eeprom_dev == IPATH_NO_DEV) {
- /* legacy not-really-I2C */
- ipath_cdbg(VERBOSE, "Start command only address\n");
- eeprom_offset = (eeprom_offset << 1) | READ_CMD;
- ret = i2c_startcmd(dd, eeprom_offset);
- } else {
- /* Actual I2C */
- ipath_cdbg(VERBOSE, "Start command uses devaddr\n");
- if (i2c_startcmd(dd, icd->eeprom_dev | WRITE_CMD)) {
- ipath_dbg("Failed EEPROM startcmd\n");
- stop_cmd(dd);
- ret = 1;
- goto bail;
- }
- ret = wr_byte(dd, eeprom_offset);
- stop_cmd(dd);
- if (ret) {
- ipath_dev_err(dd, "Failed to write EEPROM address\n");
- ret = 1;
- goto bail;
- }
- ret = i2c_startcmd(dd, icd->eeprom_dev | READ_CMD);
- }
- if (ret) {
- ipath_dbg("Failed startcmd for dev %02X\n", icd->eeprom_dev);
- stop_cmd(dd);
- ret = 1;
- goto bail;
- }
-
- /*
- * eeprom keeps clocking data out as long as we ack, automatically
- * incrementing the address.
- */
- while (len-- > 0) {
- /* get and store data */
- *bp++ = rd_byte(dd);
- /* send ack if not the last byte */
- if (len)
- send_ack(dd);
- }
-
- stop_cmd(dd);
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-static int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offset,
- const void *buffer, int len)
-{
- int sub_len;
- const u8 *bp = buffer;
- int max_wait_time, i;
- int ret;
- struct i2c_chain_desc *icd;
-
- ret = 1;
- icd = ipath_i2c_type(dd);
- if (!icd)
- goto bail;
-
- while (len > 0) {
- if (icd->eeprom_dev == IPATH_NO_DEV) {
- if (i2c_startcmd(dd,
- (eeprom_offset << 1) | WRITE_CMD)) {
- ipath_dbg("Failed to start cmd offset %u\n",
- eeprom_offset);
- goto failed_write;
- }
- } else {
- /* Real I2C */
- if (i2c_startcmd(dd, icd->eeprom_dev | WRITE_CMD)) {
- ipath_dbg("Failed EEPROM startcmd\n");
- goto failed_write;
- }
- ret = wr_byte(dd, eeprom_offset);
- if (ret) {
- ipath_dev_err(dd, "Failed to write EEPROM "
- "address\n");
- goto failed_write;
- }
- }
-
- sub_len = min(len, 4);
- eeprom_offset += sub_len;
- len -= sub_len;
-
- for (i = 0; i < sub_len; i++) {
- if (wr_byte(dd, *bp++)) {
- ipath_dbg("no ack after byte %u/%u (%u "
- "total remain)\n", i, sub_len,
- len + sub_len - i);
- goto failed_write;
- }
- }
-
- stop_cmd(dd);
-
- /*
- * wait for write complete by waiting for a successful
- * read (the chip replies with a zero after the write
- * cmd completes, and before it writes to the eeprom.
- * The startcmd for the read will fail the ack until
- * the writes have completed. We do this inline to avoid
- * the debug prints that are in the real read routine
- * if the startcmd fails.
- * We also use the proper device address, so it doesn't matter
- * whether we have real eeprom_dev. legacy likes any address.
- */
- max_wait_time = 100;
- while (i2c_startcmd(dd, icd->eeprom_dev | READ_CMD)) {
- stop_cmd(dd);
- if (!--max_wait_time) {
- ipath_dbg("Did not get successful read to "
- "complete write\n");
- goto failed_write;
- }
- }
- /* now read (and ignore) the resulting byte */
- rd_byte(dd);
- stop_cmd(dd);
- }
-
- ret = 0;
- goto bail;
-
-failed_write:
- stop_cmd(dd);
- ret = 1;
-
-bail:
- return ret;
-}
-
-/**
- * ipath_eeprom_read - receives bytes from the eeprom via I2C
- * @dd: the infinipath device
- * @eeprom_offset: address to read from
- * @buffer: where to store result
- * @len: number of bytes to receive
- */
-int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset,
- void *buff, int len)
-{
- int ret;
-
- ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
- if (!ret) {
- ret = ipath_eeprom_internal_read(dd, eeprom_offset, buff, len);
- mutex_unlock(&dd->ipath_eep_lock);
- }
-
- return ret;
-}
-
-/**
- * ipath_eeprom_write - writes data to the eeprom via I2C
- * @dd: the infinipath device
- * @eeprom_offset: where to place data
- * @buffer: data to write
- * @len: number of bytes to write
- */
-int ipath_eeprom_write(struct ipath_devdata *dd, u8 eeprom_offset,
- const void *buff, int len)
-{
- int ret;
-
- ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
- if (!ret) {
- ret = ipath_eeprom_internal_write(dd, eeprom_offset, buff, len);
- mutex_unlock(&dd->ipath_eep_lock);
- }
-
- return ret;
-}
-
-static u8 flash_csum(struct ipath_flash *ifp, int adjust)
-{
- u8 *ip = (u8 *) ifp;
- u8 csum = 0, len;
-
- /*
- * Limit length checksummed to max length of actual data.
- * Checksum of erased eeprom will still be bad, but we avoid
- * reading past the end of the buffer we were passed.
- */
- len = ifp->if_length;
- if (len > sizeof(struct ipath_flash))
- len = sizeof(struct ipath_flash);
- while (len--)
- csum += *ip++;
- csum -= ifp->if_csum;
- csum = ~csum;
- if (adjust)
- ifp->if_csum = csum;
-
- return csum;
-}
-
-/**
- * ipath_get_guid - get the GUID from the i2c device
- * @dd: the infinipath device
- *
- * We have the capability to use the ipath_nguid field, and get
- * the guid from the first chip's flash, to use for all of them.
- */
-void ipath_get_eeprom_info(struct ipath_devdata *dd)
-{
- void *buf;
- struct ipath_flash *ifp;
- __be64 guid;
- int len, eep_stat;
- u8 csum, *bguid;
- int t = dd->ipath_unit;
- struct ipath_devdata *dd0 = ipath_lookup(0);
-
- if (t && dd0->ipath_nguid > 1 && t <= dd0->ipath_nguid) {
- u8 oguid;
- dd->ipath_guid = dd0->ipath_guid;
- bguid = (u8 *) & dd->ipath_guid;
-
- oguid = bguid[7];
- bguid[7] += t;
- if (oguid > bguid[7]) {
- if (bguid[6] == 0xff) {
- if (bguid[5] == 0xff) {
- ipath_dev_err(
- dd,
- "Can't set %s GUID from "
- "base, wraps to OUI!\n",
- ipath_get_unit_name(t));
- dd->ipath_guid = 0;
- goto bail;
- }
- bguid[5]++;
- }
- bguid[6]++;
- }
- dd->ipath_nguid = 1;
-
- ipath_dbg("nguid %u, so adding %u to device 0 guid, "
- "for %llx\n",
- dd0->ipath_nguid, t,
- (unsigned long long) be64_to_cpu(dd->ipath_guid));
- goto bail;
- }
-
- /*
- * read full flash, not just currently used part, since it may have
- * been written with a newer definition
- * */
- len = sizeof(struct ipath_flash);
- buf = vmalloc(len);
- if (!buf) {
- ipath_dev_err(dd, "Couldn't allocate memory to read %u "
- "bytes from eeprom for GUID\n", len);
- goto bail;
- }
-
- mutex_lock(&dd->ipath_eep_lock);
- eep_stat = ipath_eeprom_internal_read(dd, 0, buf, len);
- mutex_unlock(&dd->ipath_eep_lock);
-
- if (eep_stat) {
- ipath_dev_err(dd, "Failed reading GUID from eeprom\n");
- goto done;
- }
- ifp = (struct ipath_flash *)buf;
-
- csum = flash_csum(ifp, 0);
- if (csum != ifp->if_csum) {
- dev_info(&dd->pcidev->dev, "Bad I2C flash checksum: "
- "0x%x, not 0x%x\n", csum, ifp->if_csum);
- goto done;
- }
- if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) ||
- *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) {
- ipath_dev_err(dd, "Invalid GUID %llx from flash; "
- "ignoring\n",
- *(unsigned long long *) ifp->if_guid);
- /* don't allow GUID if all 0 or all 1's */
- goto done;
- }
-
- /* complain, but allow it */
- if (*(u64 *) ifp->if_guid == 0x100007511000000ULL)
- dev_info(&dd->pcidev->dev, "Warning, GUID %llx is "
- "default, probably not correct!\n",
- *(unsigned long long *) ifp->if_guid);
-
- bguid = ifp->if_guid;
- if (!bguid[0] && !bguid[1] && !bguid[2]) {
- /* original incorrect GUID format in flash; fix in
- * core copy, by shifting up 2 octets; don't need to
- * change top octet, since both it and shifted are
- * 0.. */
- bguid[1] = bguid[3];
- bguid[2] = bguid[4];
- bguid[3] = bguid[4] = 0;
- guid = *(__be64 *) ifp->if_guid;
- ipath_cdbg(VERBOSE, "Old GUID format in flash, top 3 zero, "
- "shifting 2 octets\n");
- } else
- guid = *(__be64 *) ifp->if_guid;
- dd->ipath_guid = guid;
- dd->ipath_nguid = ifp->if_numguid;
- /*
- * Things are slightly complicated by the desire to transparently
- * support both the Pathscale 10-digit serial number and the QLogic
- * 13-character version.
- */
- if ((ifp->if_fversion > 1) && ifp->if_sprefix[0]
- && ((u8 *)ifp->if_sprefix)[0] != 0xFF) {
- /* This board has a Serial-prefix, which is stored
- * elsewhere for backward-compatibility.
- */
- char *snp = dd->ipath_serial;
- memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
- snp[sizeof ifp->if_sprefix] = '\0';
- len = strlen(snp);
- snp += len;
- len = (sizeof dd->ipath_serial) - len;
- if (len > sizeof ifp->if_serial) {
- len = sizeof ifp->if_serial;
- }
- memcpy(snp, ifp->if_serial, len);
- } else
- memcpy(dd->ipath_serial, ifp->if_serial,
- sizeof ifp->if_serial);
- if (!strstr(ifp->if_comment, "Tested successfully"))
- ipath_dev_err(dd, "Board SN %s did not pass functional "
- "test: %s\n", dd->ipath_serial,
- ifp->if_comment);
-
- ipath_cdbg(VERBOSE, "Initted GUID to %llx from eeprom\n",
- (unsigned long long) be64_to_cpu(dd->ipath_guid));
-
- memcpy(&dd->ipath_eep_st_errs, &ifp->if_errcntp, IPATH_EEP_LOG_CNT);
- /*
- * Power-on (actually "active") hours are kept as little-endian value
- * in EEPROM, but as seconds in a (possibly as small as 24-bit)
- * atomic_t while running.
- */
- atomic_set(&dd->ipath_active_time, 0);
- dd->ipath_eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
-
-done:
- vfree(buf);
-
-bail:;
-}
-
-/**
- * ipath_update_eeprom_log - copy active-time and error counters to eeprom
- * @dd: the infinipath device
- *
- * Although the time is kept as seconds in the ipath_devdata struct, it is
- * rounded to hours for re-write, as we have only 16 bits in EEPROM.
- * First-cut code reads whole (expected) struct ipath_flash, modifies,
- * re-writes. Future direction: read/write only what we need, assuming
- * that the EEPROM had to have been "good enough" for driver init, and
- * if not, we aren't making it worse.
- *
- */
-
-int ipath_update_eeprom_log(struct ipath_devdata *dd)
-{
- void *buf;
- struct ipath_flash *ifp;
- int len, hi_water;
- uint32_t new_time, new_hrs;
- u8 csum;
- int ret, idx;
- unsigned long flags;
-
- /* first, check if we actually need to do anything. */
- ret = 0;
- for (idx = 0; idx < IPATH_EEP_LOG_CNT; ++idx) {
- if (dd->ipath_eep_st_new_errs[idx]) {
- ret = 1;
- break;
- }
- }
- new_time = atomic_read(&dd->ipath_active_time);
-
- if (ret == 0 && new_time < 3600)
- return 0;
-
- /*
- * The quick-check above determined that there is something worthy
- * of logging, so get current contents and do a more detailed idea.
- * read full flash, not just currently used part, since it may have
- * been written with a newer definition
- */
- len = sizeof(struct ipath_flash);
- buf = vmalloc(len);
- ret = 1;
- if (!buf) {
- ipath_dev_err(dd, "Couldn't allocate memory to read %u "
- "bytes from eeprom for logging\n", len);
- goto bail;
- }
-
- /* Grab semaphore and read current EEPROM. If we get an
- * error, let go, but if not, keep it until we finish write.
- */
- ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
- if (ret) {
- ipath_dev_err(dd, "Unable to acquire EEPROM for logging\n");
- goto free_bail;
- }
- ret = ipath_eeprom_internal_read(dd, 0, buf, len);
- if (ret) {
- mutex_unlock(&dd->ipath_eep_lock);
- ipath_dev_err(dd, "Unable read EEPROM for logging\n");
- goto free_bail;
- }
- ifp = (struct ipath_flash *)buf;
-
- csum = flash_csum(ifp, 0);
- if (csum != ifp->if_csum) {
- mutex_unlock(&dd->ipath_eep_lock);
- ipath_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
- csum, ifp->if_csum);
- ret = 1;
- goto free_bail;
- }
- hi_water = 0;
- spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
- for (idx = 0; idx < IPATH_EEP_LOG_CNT; ++idx) {
- int new_val = dd->ipath_eep_st_new_errs[idx];
- if (new_val) {
- /*
- * If we have seen any errors, add to EEPROM values
- * We need to saturate at 0xFF (255) and we also
- * would need to adjust the checksum if we were
- * trying to minimize EEPROM traffic
- * Note that we add to actual current count in EEPROM,
- * in case it was altered while we were running.
- */
- new_val += ifp->if_errcntp[idx];
- if (new_val > 0xFF)
- new_val = 0xFF;
- if (ifp->if_errcntp[idx] != new_val) {
- ifp->if_errcntp[idx] = new_val;
- hi_water = offsetof(struct ipath_flash,
- if_errcntp) + idx;
- }
- /*
- * update our shadow (used to minimize EEPROM
- * traffic), to match what we are about to write.
- */
- dd->ipath_eep_st_errs[idx] = new_val;
- dd->ipath_eep_st_new_errs[idx] = 0;
- }
- }
- /*
- * now update active-time. We would like to round to the nearest hour
- * but unless atomic_t are sure to be proper signed ints we cannot,
- * because we need to account for what we "transfer" to EEPROM and
- * if we log an hour at 31 minutes, then we would need to set
- * active_time to -29 to accurately count the _next_ hour.
- */
- if (new_time >= 3600) {
- new_hrs = new_time / 3600;
- atomic_sub((new_hrs * 3600), &dd->ipath_active_time);
- new_hrs += dd->ipath_eep_hrs;
- if (new_hrs > 0xFFFF)
- new_hrs = 0xFFFF;
- dd->ipath_eep_hrs = new_hrs;
- if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
- ifp->if_powerhour[0] = new_hrs & 0xFF;
- hi_water = offsetof(struct ipath_flash, if_powerhour);
- }
- if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
- ifp->if_powerhour[1] = new_hrs >> 8;
- hi_water = offsetof(struct ipath_flash, if_powerhour)
- + 1;
- }
- }
- /*
- * There is a tiny possibility that we could somehow fail to write
- * the EEPROM after updating our shadows, but problems from holding
- * the spinlock too long are a much bigger issue.
- */
- spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
- if (hi_water) {
- /* we made some change to the data, uopdate cksum and write */
- csum = flash_csum(ifp, 1);
- ret = ipath_eeprom_internal_write(dd, 0, buf, hi_water + 1);
- }
- mutex_unlock(&dd->ipath_eep_lock);
- if (ret)
- ipath_dev_err(dd, "Failed updating EEPROM\n");
-
-free_bail:
- vfree(buf);
-bail:
- return ret;
-
-}
-
-/**
- * ipath_inc_eeprom_err - increment one of the four error counters
- * that are logged to EEPROM.
- * @dd: the infinipath device
- * @eidx: 0..3, the counter to increment
- * @incr: how much to add
- *
- * Each counter is 8-bits, and saturates at 255 (0xFF). They
- * are copied to the EEPROM (aka flash) whenever ipath_update_eeprom_log()
- * is called, but it can only be called in a context that allows sleep.
- * This function can be called even at interrupt level.
- */
-
-void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr)
-{
- uint new_val;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
- new_val = dd->ipath_eep_st_new_errs[eidx] + incr;
- if (new_val > 255)
- new_val = 255;
- dd->ipath_eep_st_new_errs[eidx] = new_val;
- spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
- return;
-}
-
-static int ipath_tempsense_internal_read(struct ipath_devdata *dd, u8 regnum)
-{
- int ret;
- struct i2c_chain_desc *icd;
-
- ret = -ENOENT;
-
- icd = ipath_i2c_type(dd);
- if (!icd)
- goto bail;
-
- if (icd->temp_dev == IPATH_NO_DEV) {
- /* tempsense only exists on new, real-I2C boards */
- ret = -ENXIO;
- goto bail;
- }
-
- if (i2c_startcmd(dd, icd->temp_dev | WRITE_CMD)) {
- ipath_dbg("Failed tempsense startcmd\n");
- stop_cmd(dd);
- ret = -ENXIO;
- goto bail;
- }
- ret = wr_byte(dd, regnum);
- stop_cmd(dd);
- if (ret) {
- ipath_dev_err(dd, "Failed tempsense WR command %02X\n",
- regnum);
- ret = -ENXIO;
- goto bail;
- }
- if (i2c_startcmd(dd, icd->temp_dev | READ_CMD)) {
- ipath_dbg("Failed tempsense RD startcmd\n");
- stop_cmd(dd);
- ret = -ENXIO;
- goto bail;
- }
- /*
- * We can only clock out one byte per command, sensibly
- */
- ret = rd_byte(dd);
- stop_cmd(dd);
-
-bail:
- return ret;
-}
-
-#define VALID_TS_RD_REG_MASK 0xBF
-
-/**
- * ipath_tempsense_read - read register of temp sensor via I2C
- * @dd: the infinipath device
- * @regnum: register to read from
- *
- * returns reg contents (0..255) or < 0 for error
- */
-int ipath_tempsense_read(struct ipath_devdata *dd, u8 regnum)
-{
- int ret;
-
- if (regnum > 7)
- return -EINVAL;
-
- /* return a bogus value for (the one) register we do not have */
- if (!((1 << regnum) & VALID_TS_RD_REG_MASK))
- return 0;
-
- ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
- if (!ret) {
- ret = ipath_tempsense_internal_read(dd, regnum);
- mutex_unlock(&dd->ipath_eep_lock);
- }
-
- /*
- * There are three possibilities here:
- * ret is actual value (0..255)
- * ret is -ENXIO or -EINVAL from code in this file
- * ret is -EINTR from mutex_lock_interruptible.
- */
- return ret;
-}
-
-static int ipath_tempsense_internal_write(struct ipath_devdata *dd,
- u8 regnum, u8 data)
-{
- int ret = -ENOENT;
- struct i2c_chain_desc *icd;
-
- icd = ipath_i2c_type(dd);
- if (!icd)
- goto bail;
-
- if (icd->temp_dev == IPATH_NO_DEV) {
- /* tempsense only exists on new, real-I2C boards */
- ret = -ENXIO;
- goto bail;
- }
- if (i2c_startcmd(dd, icd->temp_dev | WRITE_CMD)) {
- ipath_dbg("Failed tempsense startcmd\n");
- stop_cmd(dd);
- ret = -ENXIO;
- goto bail;
- }
- ret = wr_byte(dd, regnum);
- if (ret) {
- stop_cmd(dd);
- ipath_dev_err(dd, "Failed to write tempsense command %02X\n",
- regnum);
- ret = -ENXIO;
- goto bail;
- }
- ret = wr_byte(dd, data);
- stop_cmd(dd);
- ret = i2c_startcmd(dd, icd->temp_dev | READ_CMD);
- if (ret) {
- ipath_dev_err(dd, "Failed tempsense data wrt to %02X\n",
- regnum);
- ret = -ENXIO;
- }
-
-bail:
- return ret;
-}
-
-#define VALID_TS_WR_REG_MASK ((1 << 9) | (1 << 0xB) | (1 << 0xD))
-
-/**
- * ipath_tempsense_write - write register of temp sensor via I2C
- * @dd: the infinipath device
- * @regnum: register to write
- * @data: data to write
- *
- * returns 0 for success or < 0 for error
- */
-int ipath_tempsense_write(struct ipath_devdata *dd, u8 regnum, u8 data)
-{
- int ret;
-
- if (regnum > 15 || !((1 << regnum) & VALID_TS_WR_REG_MASK))
- return -EINVAL;
-
- ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
- if (!ret) {
- ret = ipath_tempsense_internal_write(dd, regnum, data);
- mutex_unlock(&dd->ipath_eep_lock);
- }
-
- /*
- * There are three possibilities here:
- * ret is 0 for success
- * ret is -ENXIO or -EINVAL from code in this file
- * ret is -EINTR from mutex_lock_interruptible.
- */
- return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_file_ops.c b/drivers/staging/rdma/ipath/ipath_file_ops.c
deleted file mode 100644
index 6187b848b3ca..000000000000
--- a/drivers/staging/rdma/ipath/ipath_file_ops.c
+++ /dev/null
@@ -1,2619 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/poll.h>
-#include <linux/cdev.h>
-#include <linux/swap.h>
-#include <linux/export.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/highmem.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/cpu.h>
-#include <linux/uio.h>
-#include <asm/pgtable.h>
-
-#include "ipath_kernel.h"
-#include "ipath_common.h"
-#include "ipath_user_sdma.h"
-
-static int ipath_open(struct inode *, struct file *);
-static int ipath_close(struct inode *, struct file *);
-static ssize_t ipath_write(struct file *, const char __user *, size_t,
- loff_t *);
-static ssize_t ipath_write_iter(struct kiocb *, struct iov_iter *from);
-static unsigned int ipath_poll(struct file *, struct poll_table_struct *);
-static int ipath_mmap(struct file *, struct vm_area_struct *);
-
-/*
- * This is really, really weird shit - write() and writev() here
- * have completely unrelated semantics. Sucky userland ABI,
- * film at 11.
- */
-static const struct file_operations ipath_file_ops = {
- .owner = THIS_MODULE,
- .write = ipath_write,
- .write_iter = ipath_write_iter,
- .open = ipath_open,
- .release = ipath_close,
- .poll = ipath_poll,
- .mmap = ipath_mmap,
- .llseek = noop_llseek,
-};
-
-/*
- * Convert kernel virtual addresses to physical addresses so they don't
- * potentially conflict with the chip addresses used as mmap offsets.
- * It doesn't really matter what mmap offset we use as long as we can
- * interpret it correctly.
- */
-static u64 cvt_kvaddr(void *p)
-{
- struct page *page;
- u64 paddr = 0;
-
- page = vmalloc_to_page(p);
- if (page)
- paddr = page_to_pfn(page) << PAGE_SHIFT;
-
- return paddr;
-}
-
-static int ipath_get_base_info(struct file *fp,
- void __user *ubase, size_t ubase_size)
-{
- struct ipath_portdata *pd = port_fp(fp);
- int ret = 0;
- struct ipath_base_info *kinfo = NULL;
- struct ipath_devdata *dd = pd->port_dd;
- unsigned subport_cnt;
- int shared, master;
- size_t sz;
-
- subport_cnt = pd->port_subport_cnt;
- if (!subport_cnt) {
- shared = 0;
- master = 0;
- subport_cnt = 1;
- } else {
- shared = 1;
- master = !subport_fp(fp);
- }
-
- sz = sizeof(*kinfo);
- /* If port sharing is not requested, allow the old size structure */
- if (!shared)
- sz -= 7 * sizeof(u64);
- if (ubase_size < sz) {
- ipath_cdbg(PROC,
- "Base size %zu, need %zu (version mismatch?)\n",
- ubase_size, sz);
- ret = -EINVAL;
- goto bail;
- }
-
- kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
- if (kinfo == NULL) {
- ret = -ENOMEM;
- goto bail;
- }
-
- ret = dd->ipath_f_get_base_info(pd, kinfo);
- if (ret < 0)
- goto bail;
-
- kinfo->spi_rcvhdr_cnt = dd->ipath_rcvhdrcnt;
- kinfo->spi_rcvhdrent_size = dd->ipath_rcvhdrentsize;
- kinfo->spi_tidegrcnt = dd->ipath_rcvegrcnt;
- kinfo->spi_rcv_egrbufsize = dd->ipath_rcvegrbufsize;
- /*
- * have to mmap whole thing
- */
- kinfo->spi_rcv_egrbuftotlen =
- pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
- kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk;
- kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
- pd->port_rcvegrbuf_chunks;
- kinfo->spi_tidcnt = dd->ipath_rcvtidcnt / subport_cnt;
- if (master)
- kinfo->spi_tidcnt += dd->ipath_rcvtidcnt % subport_cnt;
- /*
- * for this use, may be ipath_cfgports summed over all chips that
- * are are configured and present
- */
- kinfo->spi_nports = dd->ipath_cfgports;
- /* unit (chip/board) our port is on */
- kinfo->spi_unit = dd->ipath_unit;
- /* for now, only a single page */
- kinfo->spi_tid_maxsize = PAGE_SIZE;
-
- /*
- * Doing this per port, and based on the skip value, etc. This has
- * to be the actual buffer size, since the protocol code treats it
- * as an array.
- *
- * These have to be set to user addresses in the user code via mmap.
- * These values are used on return to user code for the mmap target
- * addresses only. For 32 bit, same 44 bit address problem, so use
- * the physical address, not virtual. Before 2.6.11, using the
- * page_address() macro worked, but in 2.6.11, even that returns the
- * full 64 bit address (upper bits all 1's). So far, using the
- * physical addresses (or chip offsets, for chip mapping) works, but
- * no doubt some future kernel release will change that, and we'll be
- * on to yet another method of dealing with this.
- */
- kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys;
- kinfo->spi_rcvhdr_tailaddr = (u64) pd->port_rcvhdrqtailaddr_phys;
- kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys;
- kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys;
- kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
- (void *) dd->ipath_statusp -
- (void *) dd->ipath_pioavailregs_dma;
- if (!shared) {
- kinfo->spi_piocnt = pd->port_piocnt;
- kinfo->spi_piobufbase = (u64) pd->port_piobufs;
- kinfo->__spi_uregbase = (u64) dd->ipath_uregbase +
- dd->ipath_ureg_align * pd->port_port;
- } else if (master) {
- kinfo->spi_piocnt = (pd->port_piocnt / subport_cnt) +
- (pd->port_piocnt % subport_cnt);
- /* Master's PIO buffers are after all the slave's */
- kinfo->spi_piobufbase = (u64) pd->port_piobufs +
- dd->ipath_palign *
- (pd->port_piocnt - kinfo->spi_piocnt);
- } else {
- unsigned slave = subport_fp(fp) - 1;
-
- kinfo->spi_piocnt = pd->port_piocnt / subport_cnt;
- kinfo->spi_piobufbase = (u64) pd->port_piobufs +
- dd->ipath_palign * kinfo->spi_piocnt * slave;
- }
-
- if (shared) {
- kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
- dd->ipath_ureg_align * pd->port_port;
- kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs;
- kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base;
- kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr;
-
- kinfo->__spi_uregbase = cvt_kvaddr(pd->subport_uregbase +
- PAGE_SIZE * subport_fp(fp));
-
- kinfo->spi_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base +
- pd->port_rcvhdrq_size * subport_fp(fp));
- kinfo->spi_rcvhdr_tailaddr = 0;
- kinfo->spi_rcv_egrbufs = cvt_kvaddr(pd->subport_rcvegrbuf +
- pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size *
- subport_fp(fp));
-
- kinfo->spi_subport_uregbase =
- cvt_kvaddr(pd->subport_uregbase);
- kinfo->spi_subport_rcvegrbuf =
- cvt_kvaddr(pd->subport_rcvegrbuf);
- kinfo->spi_subport_rcvhdr_base =
- cvt_kvaddr(pd->subport_rcvhdr_base);
- ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n",
- kinfo->spi_port, kinfo->spi_runtime_flags,
- (unsigned long long) kinfo->spi_subport_uregbase,
- (unsigned long long) kinfo->spi_subport_rcvegrbuf,
- (unsigned long long) kinfo->spi_subport_rcvhdr_base);
- }
-
- /*
- * All user buffers are 2KB buffers. If we ever support
- * giving 4KB buffers to user processes, this will need some
- * work.
- */
- kinfo->spi_pioindex = (kinfo->spi_piobufbase -
- (dd->ipath_piobufbase & 0xffffffff)) / dd->ipath_palign;
- kinfo->spi_pioalign = dd->ipath_palign;
-
- kinfo->spi_qpair = IPATH_KD_QP;
- /*
- * user mode PIO buffers are always 2KB, even when 4KB can
- * be received, and sent via the kernel; this is ibmaxlen
- * for 2K MTU.
- */
- kinfo->spi_piosize = dd->ipath_piosize2k - 2 * sizeof(u32);
- kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */
- kinfo->spi_port = pd->port_port;
- kinfo->spi_subport = subport_fp(fp);
- kinfo->spi_sw_version = IPATH_KERN_SWVERSION;
- kinfo->spi_hw_version = dd->ipath_revision;
-
- if (master) {
- kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER;
- }
-
- sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
- if (copy_to_user(ubase, kinfo, sz))
- ret = -EFAULT;
-
-bail:
- kfree(kinfo);
- return ret;
-}
-
-/**
- * ipath_tid_update - update a port TID
- * @pd: the port
- * @fp: the ipath device file
- * @ti: the TID information
- *
- * The new implementation as of Oct 2004 is that the driver assigns
- * the tid and returns it to the caller. To make it easier to
- * catch bugs, and to reduce search time, we keep a cursor for
- * each port, walking the shadow tid array to find one that's not
- * in use.
- *
- * For now, if we can't allocate the full list, we fail, although
- * in the long run, we'll allocate as many as we can, and the
- * caller will deal with that by trying the remaining pages later.
- * That means that when we fail, we have to mark the tids as not in
- * use again, in our shadow copy.
- *
- * It's up to the caller to free the tids when they are done.
- * We'll unlock the pages as they free them.
- *
- * Also, right now we are locking one page at a time, but since
- * the intended use of this routine is for a single group of
- * virtually contiguous pages, that should change to improve
- * performance.
- */
-static int ipath_tid_update(struct ipath_portdata *pd, struct file *fp,
- const struct ipath_tid_info *ti)
-{
- int ret = 0, ntids;
- u32 tid, porttid, cnt, i, tidcnt, tidoff;
- u16 *tidlist;
- struct ipath_devdata *dd = pd->port_dd;
- u64 physaddr;
- unsigned long vaddr;
- u64 __iomem *tidbase;
- unsigned long tidmap[8];
- struct page **pagep = NULL;
- unsigned subport = subport_fp(fp);
-
- if (!dd->ipath_pageshadow) {
- ret = -ENOMEM;
- goto done;
- }
-
- cnt = ti->tidcnt;
- if (!cnt) {
- ipath_dbg("After copyin, tidcnt 0, tidlist %llx\n",
- (unsigned long long) ti->tidlist);
- /*
- * Should we treat as success? likely a bug
- */
- ret = -EFAULT;
- goto done;
- }
- porttid = pd->port_port * dd->ipath_rcvtidcnt;
- if (!pd->port_subport_cnt) {
- tidcnt = dd->ipath_rcvtidcnt;
- tid = pd->port_tidcursor;
- tidoff = 0;
- } else if (!subport) {
- tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) +
- (dd->ipath_rcvtidcnt % pd->port_subport_cnt);
- tidoff = dd->ipath_rcvtidcnt - tidcnt;
- porttid += tidoff;
- tid = tidcursor_fp(fp);
- } else {
- tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt;
- tidoff = tidcnt * (subport - 1);
- porttid += tidoff;
- tid = tidcursor_fp(fp);
- }
- if (cnt > tidcnt) {
- /* make sure it all fits in port_tid_pg_list */
- dev_info(&dd->pcidev->dev, "Process tried to allocate %u "
- "TIDs, only trying max (%u)\n", cnt, tidcnt);
- cnt = tidcnt;
- }
- pagep = &((struct page **) pd->port_tid_pg_list)[tidoff];
- tidlist = &((u16 *) &pagep[dd->ipath_rcvtidcnt])[tidoff];
-
- memset(tidmap, 0, sizeof(tidmap));
- /* before decrement; chip actual # */
- ntids = tidcnt;
- tidbase = (u64 __iomem *) (((char __iomem *) dd->ipath_kregbase) +
- dd->ipath_rcvtidbase +
- porttid * sizeof(*tidbase));
-
- ipath_cdbg(VERBOSE, "Port%u %u tids, cursor %u, tidbase %p\n",
- pd->port_port, cnt, tid, tidbase);
-
- /* virtual address of first page in transfer */
- vaddr = ti->tidvaddr;
- if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
- cnt * PAGE_SIZE)) {
- ipath_dbg("Fail vaddr %p, %u pages, !access_ok\n",
- (void *)vaddr, cnt);
- ret = -EFAULT;
- goto done;
- }
- ret = ipath_get_user_pages(vaddr, cnt, pagep);
- if (ret) {
- if (ret == -EBUSY) {
- ipath_dbg("Failed to lock addr %p, %u pages "
- "(already locked)\n",
- (void *) vaddr, cnt);
- /*
- * for now, continue, and see what happens but with
- * the new implementation, this should never happen,
- * unless perhaps the user has mpin'ed the pages
- * themselves (something we need to test)
- */
- ret = 0;
- } else {
- dev_info(&dd->pcidev->dev,
- "Failed to lock addr %p, %u pages: "
- "errno %d\n", (void *) vaddr, cnt, -ret);
- goto done;
- }
- }
- for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
- for (; ntids--; tid++) {
- if (tid == tidcnt)
- tid = 0;
- if (!dd->ipath_pageshadow[porttid + tid])
- break;
- }
- if (ntids < 0) {
- /*
- * oops, wrapped all the way through their TIDs,
- * and didn't have enough free; see comments at
- * start of routine
- */
- ipath_dbg("Not enough free TIDs for %u pages "
- "(index %d), failing\n", cnt, i);
- i--; /* last tidlist[i] not filled in */
- ret = -ENOMEM;
- break;
- }
- tidlist[i] = tid + tidoff;
- ipath_cdbg(VERBOSE, "Updating idx %u to TID %u, "
- "vaddr %lx\n", i, tid + tidoff, vaddr);
- /* we "know" system pages and TID pages are same size */
- dd->ipath_pageshadow[porttid + tid] = pagep[i];
- dd->ipath_physshadow[porttid + tid] = ipath_map_page(
- dd->pcidev, pagep[i], 0, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- /*
- * don't need atomic or it's overhead
- */
- __set_bit(tid, tidmap);
- physaddr = dd->ipath_physshadow[porttid + tid];
- ipath_stats.sps_pagelocks++;
- ipath_cdbg(VERBOSE,
- "TID %u, vaddr %lx, physaddr %llx pgp %p\n",
- tid, vaddr, (unsigned long long) physaddr,
- pagep[i]);
- dd->ipath_f_put_tid(dd, &tidbase[tid], RCVHQ_RCV_TYPE_EXPECTED,
- physaddr);
- /*
- * don't check this tid in ipath_portshadow, since we
- * just filled it in; start with the next one.
- */
- tid++;
- }
-
- if (ret) {
- u32 limit;
- cleanup:
- /* jump here if copy out of updated info failed... */
- ipath_dbg("After failure (ret=%d), undo %d of %d entries\n",
- -ret, i, cnt);
- /* same code that's in ipath_free_tid() */
- limit = sizeof(tidmap) * BITS_PER_BYTE;
- if (limit > tidcnt)
- /* just in case size changes in future */
- limit = tidcnt;
- tid = find_first_bit((const unsigned long *)tidmap, limit);
- for (; tid < limit; tid++) {
- if (!test_bit(tid, tidmap))
- continue;
- if (dd->ipath_pageshadow[porttid + tid]) {
- ipath_cdbg(VERBOSE, "Freeing TID %u\n",
- tid);
- dd->ipath_f_put_tid(dd, &tidbase[tid],
- RCVHQ_RCV_TYPE_EXPECTED,
- dd->ipath_tidinvalid);
- pci_unmap_page(dd->pcidev,
- dd->ipath_physshadow[porttid + tid],
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
- dd->ipath_pageshadow[porttid + tid] = NULL;
- ipath_stats.sps_pageunlocks++;
- }
- }
- ipath_release_user_pages(pagep, cnt);
- } else {
- /*
- * Copy the updated array, with ipath_tid's filled in, back
- * to user. Since we did the copy in already, this "should
- * never fail" If it does, we have to clean up...
- */
- if (copy_to_user((void __user *)
- (unsigned long) ti->tidlist,
- tidlist, cnt * sizeof(*tidlist))) {
- ret = -EFAULT;
- goto cleanup;
- }
- if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
- tidmap, sizeof tidmap)) {
- ret = -EFAULT;
- goto cleanup;
- }
- if (tid == tidcnt)
- tid = 0;
- if (!pd->port_subport_cnt)
- pd->port_tidcursor = tid;
- else
- tidcursor_fp(fp) = tid;
- }
-
-done:
- if (ret)
- ipath_dbg("Failed to map %u TID pages, failing with %d\n",
- ti->tidcnt, -ret);
- return ret;
-}
-
-/**
- * ipath_tid_free - free a port TID
- * @pd: the port
- * @subport: the subport
- * @ti: the TID info
- *
- * right now we are unlocking one page at a time, but since
- * the intended use of this routine is for a single group of
- * virtually contiguous pages, that should change to improve
- * performance. We check that the TID is in range for this port
- * but otherwise don't check validity; if user has an error and
- * frees the wrong tid, it's only their own data that can thereby
- * be corrupted. We do check that the TID was in use, for sanity
- * We always use our idea of the saved address, not the address that
- * they pass in to us.
- */
-
-static int ipath_tid_free(struct ipath_portdata *pd, unsigned subport,
- const struct ipath_tid_info *ti)
-{
- int ret = 0;
- u32 tid, porttid, cnt, limit, tidcnt;
- struct ipath_devdata *dd = pd->port_dd;
- u64 __iomem *tidbase;
- unsigned long tidmap[8];
-
- if (!dd->ipath_pageshadow) {
- ret = -ENOMEM;
- goto done;
- }
-
- if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
- sizeof tidmap)) {
- ret = -EFAULT;
- goto done;
- }
-
- porttid = pd->port_port * dd->ipath_rcvtidcnt;
- if (!pd->port_subport_cnt)
- tidcnt = dd->ipath_rcvtidcnt;
- else if (!subport) {
- tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) +
- (dd->ipath_rcvtidcnt % pd->port_subport_cnt);
- porttid += dd->ipath_rcvtidcnt - tidcnt;
- } else {
- tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt;
- porttid += tidcnt * (subport - 1);
- }
- tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
- dd->ipath_rcvtidbase +
- porttid * sizeof(*tidbase));
-
- limit = sizeof(tidmap) * BITS_PER_BYTE;
- if (limit > tidcnt)
- /* just in case size changes in future */
- limit = tidcnt;
- tid = find_first_bit(tidmap, limit);
- ipath_cdbg(VERBOSE, "Port%u free %u tids; first bit (max=%d) "
- "set is %d, porttid %u\n", pd->port_port, ti->tidcnt,
- limit, tid, porttid);
- for (cnt = 0; tid < limit; tid++) {
- /*
- * small optimization; if we detect a run of 3 or so without
- * any set, use find_first_bit again. That's mainly to
- * accelerate the case where we wrapped, so we have some at
- * the beginning, and some at the end, and a big gap
- * in the middle.
- */
- if (!test_bit(tid, tidmap))
- continue;
- cnt++;
- if (dd->ipath_pageshadow[porttid + tid]) {
- struct page *p;
- p = dd->ipath_pageshadow[porttid + tid];
- dd->ipath_pageshadow[porttid + tid] = NULL;
- ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n",
- pid_nr(pd->port_pid), tid);
- dd->ipath_f_put_tid(dd, &tidbase[tid],
- RCVHQ_RCV_TYPE_EXPECTED,
- dd->ipath_tidinvalid);
- pci_unmap_page(dd->pcidev,
- dd->ipath_physshadow[porttid + tid],
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
- ipath_release_user_pages(&p, 1);
- ipath_stats.sps_pageunlocks++;
- } else
- ipath_dbg("Unused tid %u, ignoring\n", tid);
- }
- if (cnt != ti->tidcnt)
- ipath_dbg("passed in tidcnt %d, only %d bits set in map\n",
- ti->tidcnt, cnt);
-done:
- if (ret)
- ipath_dbg("Failed to unmap %u TID pages, failing with %d\n",
- ti->tidcnt, -ret);
- return ret;
-}
-
-/**
- * ipath_set_part_key - set a partition key
- * @pd: the port
- * @key: the key
- *
- * We can have up to 4 active at a time (other than the default, which is
- * always allowed). This is somewhat tricky, since multiple ports may set
- * the same key, so we reference count them, and clean up at exit. All 4
- * partition keys are packed into a single infinipath register. It's an
- * error for a process to set the same pkey multiple times. We provide no
- * mechanism to de-allocate a pkey at this time, we may eventually need to
- * do that. I've used the atomic operations, and no locking, and only make
- * a single pass through what's available. This should be more than
- * adequate for some time. I'll think about spinlocks or the like if and as
- * it's necessary.
- */
-static int ipath_set_part_key(struct ipath_portdata *pd, u16 key)
-{
- struct ipath_devdata *dd = pd->port_dd;
- int i, any = 0, pidx = -1;
- u16 lkey = key & 0x7FFF;
- int ret;
-
- if (lkey == (IPATH_DEFAULT_P_KEY & 0x7FFF)) {
- /* nothing to do; this key always valid */
- ret = 0;
- goto bail;
- }
-
- ipath_cdbg(VERBOSE, "p%u try to set pkey %hx, current keys "
- "%hx:%x %hx:%x %hx:%x %hx:%x\n",
- pd->port_port, key, dd->ipath_pkeys[0],
- atomic_read(&dd->ipath_pkeyrefs[0]), dd->ipath_pkeys[1],
- atomic_read(&dd->ipath_pkeyrefs[1]), dd->ipath_pkeys[2],
- atomic_read(&dd->ipath_pkeyrefs[2]), dd->ipath_pkeys[3],
- atomic_read(&dd->ipath_pkeyrefs[3]));
-
- if (!lkey) {
- ipath_cdbg(PROC, "p%u tries to set key 0, not allowed\n",
- pd->port_port);
- ret = -EINVAL;
- goto bail;
- }
-
- /*
- * Set the full membership bit, because it has to be
- * set in the register or the packet, and it seems
- * cleaner to set in the register than to force all
- * callers to set it. (see bug 4331)
- */
- key |= 0x8000;
-
- for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
- if (!pd->port_pkeys[i] && pidx == -1)
- pidx = i;
- if (pd->port_pkeys[i] == key) {
- ipath_cdbg(VERBOSE, "p%u tries to set same pkey "
- "(%x) more than once\n",
- pd->port_port, key);
- ret = -EEXIST;
- goto bail;
- }
- }
- if (pidx == -1) {
- ipath_dbg("All pkeys for port %u already in use, "
- "can't set %x\n", pd->port_port, key);
- ret = -EBUSY;
- goto bail;
- }
- for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
- if (!dd->ipath_pkeys[i]) {
- any++;
- continue;
- }
- if (dd->ipath_pkeys[i] == key) {
- atomic_t *pkrefs = &dd->ipath_pkeyrefs[i];
-
- if (atomic_inc_return(pkrefs) > 1) {
- pd->port_pkeys[pidx] = key;
- ipath_cdbg(VERBOSE, "p%u set key %x "
- "matches #%d, count now %d\n",
- pd->port_port, key, i,
- atomic_read(pkrefs));
- ret = 0;
- goto bail;
- } else {
- /*
- * lost race, decrement count, catch below
- */
- atomic_dec(pkrefs);
- ipath_cdbg(VERBOSE, "Lost race, count was "
- "0, after dec, it's %d\n",
- atomic_read(pkrefs));
- any++;
- }
- }
- if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
- /*
- * It makes no sense to have both the limited and
- * full membership PKEY set at the same time since
- * the unlimited one will disable the limited one.
- */
- ret = -EEXIST;
- goto bail;
- }
- }
- if (!any) {
- ipath_dbg("port %u, all pkeys already in use, "
- "can't set %x\n", pd->port_port, key);
- ret = -EBUSY;
- goto bail;
- }
- for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
- if (!dd->ipath_pkeys[i] &&
- atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
- u64 pkey;
-
- /* for ipathstats, etc. */
- ipath_stats.sps_pkeys[i] = lkey;
- pd->port_pkeys[pidx] = dd->ipath_pkeys[i] = key;
- pkey =
- (u64) dd->ipath_pkeys[0] |
- ((u64) dd->ipath_pkeys[1] << 16) |
- ((u64) dd->ipath_pkeys[2] << 32) |
- ((u64) dd->ipath_pkeys[3] << 48);
- ipath_cdbg(PROC, "p%u set key %x in #%d, "
- "portidx %d, new pkey reg %llx\n",
- pd->port_port, key, i, pidx,
- (unsigned long long) pkey);
- ipath_write_kreg(
- dd, dd->ipath_kregs->kr_partitionkey, pkey);
-
- ret = 0;
- goto bail;
- }
- }
- ipath_dbg("port %u, all pkeys already in use 2nd pass, "
- "can't set %x\n", pd->port_port, key);
- ret = -EBUSY;
-
-bail:
- return ret;
-}
-
-/**
- * ipath_manage_rcvq - manage a port's receive queue
- * @pd: the port
- * @subport: the subport
- * @start_stop: action to carry out
- *
- * start_stop == 0 disables receive on the port, for use in queue
- * overflow conditions. start_stop==1 re-enables, to be used to
- * re-init the software copy of the head register
- */
-static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
- int start_stop)
-{
- struct ipath_devdata *dd = pd->port_dd;
-
- ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n",
- start_stop ? "en" : "dis", dd->ipath_unit,
- pd->port_port, subport);
- if (subport)
- goto bail;
- /* atomically clear receive enable port. */
- if (start_stop) {
- /*
- * On enable, force in-memory copy of the tail register to
- * 0, so that protocol code doesn't have to worry about
- * whether or not the chip has yet updated the in-memory
- * copy or not on return from the system call. The chip
- * always resets it's tail register back to 0 on a
- * transition from disabled to enabled. This could cause a
- * problem if software was broken, and did the enable w/o
- * the disable, but eventually the in-memory copy will be
- * updated and correct itself, even in the face of software
- * bugs.
- */
- if (pd->port_rcvhdrtail_kvaddr)
- ipath_clear_rcvhdrtail(pd);
- set_bit(dd->ipath_r_portenable_shift + pd->port_port,
- &dd->ipath_rcvctrl);
- } else
- clear_bit(dd->ipath_r_portenable_shift + pd->port_port,
- &dd->ipath_rcvctrl);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
- dd->ipath_rcvctrl);
- /* now be sure chip saw it before we return */
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- if (start_stop) {
- /*
- * And try to be sure that tail reg update has happened too.
- * This should in theory interlock with the RXE changes to
- * the tail register. Don't assign it to the tail register
- * in memory copy, since we could overwrite an update by the
- * chip if we did.
- */
- ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
- }
- /* always; new head should be equal to new tail; see above */
-bail:
- return 0;
-}
-
-static void ipath_clean_part_key(struct ipath_portdata *pd,
- struct ipath_devdata *dd)
-{
- int i, j, pchanged = 0;
- u64 oldpkey;
-
- /* for debugging only */
- oldpkey = (u64) dd->ipath_pkeys[0] |
- ((u64) dd->ipath_pkeys[1] << 16) |
- ((u64) dd->ipath_pkeys[2] << 32) |
- ((u64) dd->ipath_pkeys[3] << 48);
-
- for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
- if (!pd->port_pkeys[i])
- continue;
- ipath_cdbg(VERBOSE, "look for key[%d] %hx in pkeys\n", i,
- pd->port_pkeys[i]);
- for (j = 0; j < ARRAY_SIZE(dd->ipath_pkeys); j++) {
- /* check for match independent of the global bit */
- if ((dd->ipath_pkeys[j] & 0x7fff) !=
- (pd->port_pkeys[i] & 0x7fff))
- continue;
- if (atomic_dec_and_test(&dd->ipath_pkeyrefs[j])) {
- ipath_cdbg(VERBOSE, "p%u clear key "
- "%x matches #%d\n",
- pd->port_port,
- pd->port_pkeys[i], j);
- ipath_stats.sps_pkeys[j] =
- dd->ipath_pkeys[j] = 0;
- pchanged++;
- } else {
- ipath_cdbg(VERBOSE, "p%u key %x matches #%d, "
- "but ref still %d\n", pd->port_port,
- pd->port_pkeys[i], j,
- atomic_read(&dd->ipath_pkeyrefs[j]));
- break;
- }
- }
- pd->port_pkeys[i] = 0;
- }
- if (pchanged) {
- u64 pkey = (u64) dd->ipath_pkeys[0] |
- ((u64) dd->ipath_pkeys[1] << 16) |
- ((u64) dd->ipath_pkeys[2] << 32) |
- ((u64) dd->ipath_pkeys[3] << 48);
- ipath_cdbg(VERBOSE, "p%u old pkey reg %llx, "
- "new pkey reg %llx\n", pd->port_port,
- (unsigned long long) oldpkey,
- (unsigned long long) pkey);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
- pkey);
- }
-}
-
-/*
- * Initialize the port data with the receive buffer sizes
- * so this can be done while the master port is locked.
- * Otherwise, there is a race with a slave opening the port
- * and seeing these fields uninitialized.
- */
-static void init_user_egr_sizes(struct ipath_portdata *pd)
-{
- struct ipath_devdata *dd = pd->port_dd;
- unsigned egrperchunk, egrcnt, size;
-
- /*
- * to avoid wasting a lot of memory, we allocate 32KB chunks of
- * physically contiguous memory, advance through it until used up
- * and then allocate more. Of course, we need memory to store those
- * extra pointers, now. Started out with 256KB, but under heavy
- * memory pressure (creating large files and then copying them over
- * NFS while doing lots of MPI jobs), we hit some allocation
- * failures, even though we can sleep... (2.6.10) Still get
- * failures at 64K. 32K is the lowest we can go without wasting
- * additional memory.
- */
- size = 0x8000;
- egrperchunk = size / dd->ipath_rcvegrbufsize;
- egrcnt = dd->ipath_rcvegrcnt;
- pd->port_rcvegrbuf_chunks = (egrcnt + egrperchunk - 1) / egrperchunk;
- pd->port_rcvegrbufs_perchunk = egrperchunk;
- pd->port_rcvegrbuf_size = size;
-}
-
-/**
- * ipath_create_user_egr - allocate eager TID buffers
- * @pd: the port to allocate TID buffers for
- *
- * This routine is now quite different for user and kernel, because
- * the kernel uses skb's, for the accelerated network performance
- * This is the user port version
- *
- * Allocate the eager TID buffers and program them into infinipath
- * They are no longer completely contiguous, we do multiple allocation
- * calls.
- */
-static int ipath_create_user_egr(struct ipath_portdata *pd)
-{
- struct ipath_devdata *dd = pd->port_dd;
- unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
- size_t size;
- int ret;
- gfp_t gfp_flags;
-
- /*
- * GFP_USER, but without GFP_FS, so buffer cache can be
- * coalesced (we hope); otherwise, even at order 4,
- * heavy filesystem activity makes these fail, and we can
- * use compound pages.
- */
- gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
-
- egrcnt = dd->ipath_rcvegrcnt;
- /* TID number offset for this port */
- egroff = (pd->port_port - 1) * egrcnt + dd->ipath_p0_rcvegrcnt;
- egrsize = dd->ipath_rcvegrbufsize;
- ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid "
- "offset %x, egrsize %u\n", egrcnt, egroff, egrsize);
-
- chunk = pd->port_rcvegrbuf_chunks;
- egrperchunk = pd->port_rcvegrbufs_perchunk;
- size = pd->port_rcvegrbuf_size;
- pd->port_rcvegrbuf = kmalloc_array(chunk, sizeof(pd->port_rcvegrbuf[0]),
- GFP_KERNEL);
- if (!pd->port_rcvegrbuf) {
- ret = -ENOMEM;
- goto bail;
- }
- pd->port_rcvegrbuf_phys =
- kmalloc_array(chunk, sizeof(pd->port_rcvegrbuf_phys[0]),
- GFP_KERNEL);
- if (!pd->port_rcvegrbuf_phys) {
- ret = -ENOMEM;
- goto bail_rcvegrbuf;
- }
- for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
-
- pd->port_rcvegrbuf[e] = dma_alloc_coherent(
- &dd->pcidev->dev, size, &pd->port_rcvegrbuf_phys[e],
- gfp_flags);
-
- if (!pd->port_rcvegrbuf[e]) {
- ret = -ENOMEM;
- goto bail_rcvegrbuf_phys;
- }
- }
-
- pd->port_rcvegr_phys = pd->port_rcvegrbuf_phys[0];
-
- for (e = chunk = 0; chunk < pd->port_rcvegrbuf_chunks; chunk++) {
- dma_addr_t pa = pd->port_rcvegrbuf_phys[chunk];
- unsigned i;
-
- for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
- dd->ipath_f_put_tid(dd, e + egroff +
- (u64 __iomem *)
- ((char __iomem *)
- dd->ipath_kregbase +
- dd->ipath_rcvegrbase),
- RCVHQ_RCV_TYPE_EAGER, pa);
- pa += egrsize;
- }
- cond_resched(); /* don't hog the cpu */
- }
-
- ret = 0;
- goto bail;
-
-bail_rcvegrbuf_phys:
- for (e = 0; e < pd->port_rcvegrbuf_chunks &&
- pd->port_rcvegrbuf[e]; e++) {
- dma_free_coherent(&dd->pcidev->dev, size,
- pd->port_rcvegrbuf[e],
- pd->port_rcvegrbuf_phys[e]);
-
- }
- kfree(pd->port_rcvegrbuf_phys);
- pd->port_rcvegrbuf_phys = NULL;
-bail_rcvegrbuf:
- kfree(pd->port_rcvegrbuf);
- pd->port_rcvegrbuf = NULL;
-bail:
- return ret;
-}
-
-
-/* common code for the mappings on dma_alloc_coherent mem */
-static int ipath_mmap_mem(struct vm_area_struct *vma,
- struct ipath_portdata *pd, unsigned len, int write_ok,
- void *kvaddr, char *what)
-{
- struct ipath_devdata *dd = pd->port_dd;
- unsigned long pfn;
- int ret;
-
- if ((vma->vm_end - vma->vm_start) > len) {
- dev_info(&dd->pcidev->dev,
- "FAIL on %s: len %lx > %x\n", what,
- vma->vm_end - vma->vm_start, len);
- ret = -EFAULT;
- goto bail;
- }
-
- if (!write_ok) {
- if (vma->vm_flags & VM_WRITE) {
- dev_info(&dd->pcidev->dev,
- "%s must be mapped readonly\n", what);
- ret = -EPERM;
- goto bail;
- }
-
- /* don't allow them to later change with mprotect */
- vma->vm_flags &= ~VM_MAYWRITE;
- }
-
- pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
- ret = remap_pfn_range(vma, vma->vm_start, pfn,
- len, vma->vm_page_prot);
- if (ret)
- dev_info(&dd->pcidev->dev, "%s port%u mmap of %lx, %x "
- "bytes r%c failed: %d\n", what, pd->port_port,
- pfn, len, write_ok?'w':'o', ret);
- else
- ipath_cdbg(VERBOSE, "%s port%u mmaped %lx, %x bytes "
- "r%c\n", what, pd->port_port, pfn, len,
- write_ok?'w':'o');
-bail:
- return ret;
-}
-
-static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd,
- u64 ureg)
-{
- unsigned long phys;
- int ret;
-
- /*
- * This is real hardware, so use io_remap. This is the mechanism
- * for the user process to update the head registers for their port
- * in the chip.
- */
- if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
- dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen "
- "%lx > PAGE\n", vma->vm_end - vma->vm_start);
- ret = -EFAULT;
- } else {
- phys = dd->ipath_physaddr + ureg;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
- ret = io_remap_pfn_range(vma, vma->vm_start,
- phys >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
- }
- return ret;
-}
-
-static int mmap_piobufs(struct vm_area_struct *vma,
- struct ipath_devdata *dd,
- struct ipath_portdata *pd,
- unsigned piobufs, unsigned piocnt)
-{
- unsigned long phys;
- int ret;
-
- /*
- * When we map the PIO buffers in the chip, we want to map them as
- * writeonly, no read possible. This prevents access to previous
- * process data, and catches users who might try to read the i/o
- * space due to a bug.
- */
- if ((vma->vm_end - vma->vm_start) > (piocnt * dd->ipath_palign)) {
- dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: "
- "reqlen %lx > PAGE\n",
- vma->vm_end - vma->vm_start);
- ret = -EINVAL;
- goto bail;
- }
-
- phys = dd->ipath_physaddr + piobufs;
-
-#if defined(__powerpc__)
- /* There isn't a generic way to specify writethrough mappings */
- pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
- pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
- pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
-#endif
-
- /*
- * don't allow them to later change to readable with mprotect (for when
- * not initially mapped readable, as is normally the case)
- */
- vma->vm_flags &= ~VM_MAYREAD;
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
-
- ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
-bail:
- return ret;
-}
-
-static int mmap_rcvegrbufs(struct vm_area_struct *vma,
- struct ipath_portdata *pd)
-{
- struct ipath_devdata *dd = pd->port_dd;
- unsigned long start, size;
- size_t total_size, i;
- unsigned long pfn;
- int ret;
-
- size = pd->port_rcvegrbuf_size;
- total_size = pd->port_rcvegrbuf_chunks * size;
- if ((vma->vm_end - vma->vm_start) > total_size) {
- dev_info(&dd->pcidev->dev, "FAIL on egr bufs: "
- "reqlen %lx > actual %lx\n",
- vma->vm_end - vma->vm_start,
- (unsigned long) total_size);
- ret = -EINVAL;
- goto bail;
- }
-
- if (vma->vm_flags & VM_WRITE) {
- dev_info(&dd->pcidev->dev, "Can't map eager buffers as "
- "writable (flags=%lx)\n", vma->vm_flags);
- ret = -EPERM;
- goto bail;
- }
- /* don't allow them to later change to writeable with mprotect */
- vma->vm_flags &= ~VM_MAYWRITE;
-
- start = vma->vm_start;
-
- for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) {
- pfn = virt_to_phys(pd->port_rcvegrbuf[i]) >> PAGE_SHIFT;
- ret = remap_pfn_range(vma, start, pfn, size,
- vma->vm_page_prot);
- if (ret < 0)
- goto bail;
- }
- ret = 0;
-
-bail:
- return ret;
-}
-
-/*
- * ipath_file_vma_fault - handle a VMA page fault.
- */
-static int ipath_file_vma_fault(struct vm_area_struct *vma,
- struct vm_fault *vmf)
-{
- struct page *page;
-
- page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
- if (!page)
- return VM_FAULT_SIGBUS;
- get_page(page);
- vmf->page = page;
-
- return 0;
-}
-
-static const struct vm_operations_struct ipath_file_vm_ops = {
- .fault = ipath_file_vma_fault,
-};
-
-static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
- struct ipath_portdata *pd, unsigned subport)
-{
- unsigned long len;
- struct ipath_devdata *dd;
- void *addr;
- size_t size;
- int ret = 0;
-
- /* If the port is not shared, all addresses should be physical */
- if (!pd->port_subport_cnt)
- goto bail;
-
- dd = pd->port_dd;
- size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
-
- /*
- * Each process has all the subport uregbase, rcvhdrq, and
- * rcvegrbufs mmapped - as an array for all the processes,
- * and also separately for this process.
- */
- if (pgaddr == cvt_kvaddr(pd->subport_uregbase)) {
- addr = pd->subport_uregbase;
- size = PAGE_SIZE * pd->port_subport_cnt;
- } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base)) {
- addr = pd->subport_rcvhdr_base;
- size = pd->port_rcvhdrq_size * pd->port_subport_cnt;
- } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf)) {
- addr = pd->subport_rcvegrbuf;
- size *= pd->port_subport_cnt;
- } else if (pgaddr == cvt_kvaddr(pd->subport_uregbase +
- PAGE_SIZE * subport)) {
- addr = pd->subport_uregbase + PAGE_SIZE * subport;
- size = PAGE_SIZE;
- } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base +
- pd->port_rcvhdrq_size * subport)) {
- addr = pd->subport_rcvhdr_base +
- pd->port_rcvhdrq_size * subport;
- size = pd->port_rcvhdrq_size;
- } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf +
- size * subport)) {
- addr = pd->subport_rcvegrbuf + size * subport;
- /* rcvegrbufs are read-only on the slave */
- if (vma->vm_flags & VM_WRITE) {
- dev_info(&dd->pcidev->dev,
- "Can't map eager buffers as "
- "writable (flags=%lx)\n", vma->vm_flags);
- ret = -EPERM;
- goto bail;
- }
- /*
- * Don't allow permission to later change to writeable
- * with mprotect.
- */
- vma->vm_flags &= ~VM_MAYWRITE;
- } else {
- goto bail;
- }
- len = vma->vm_end - vma->vm_start;
- if (len > size) {
- ipath_cdbg(MM, "FAIL: reqlen %lx > %zx\n", len, size);
- ret = -EINVAL;
- goto bail;
- }
-
- vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
- vma->vm_ops = &ipath_file_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- ret = 1;
-
-bail:
- return ret;
-}
-
-/**
- * ipath_mmap - mmap various structures into user space
- * @fp: the file pointer
- * @vma: the VM area
- *
- * We use this to have a shared buffer between the kernel and the user code
- * for the rcvhdr queue, egr buffers, and the per-port user regs and pio
- * buffers in the chip. We have the open and close entries so we can bump
- * the ref count and keep the driver from being unloaded while still mapped.
- */
-static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
-{
- struct ipath_portdata *pd;
- struct ipath_devdata *dd;
- u64 pgaddr, ureg;
- unsigned piobufs, piocnt;
- int ret;
-
- pd = port_fp(fp);
- if (!pd) {
- ret = -EINVAL;
- goto bail;
- }
- dd = pd->port_dd;
-
- /*
- * This is the ipath_do_user_init() code, mapping the shared buffers
- * into the user process. The address referred to by vm_pgoff is the
- * file offset passed via mmap(). For shared ports, this is the
- * kernel vmalloc() address of the pages to share with the master.
- * For non-shared or master ports, this is a physical address.
- * We only do one mmap for each space mapped.
- */
- pgaddr = vma->vm_pgoff << PAGE_SHIFT;
-
- /*
- * Check for 0 in case one of the allocations failed, but user
- * called mmap anyway.
- */
- if (!pgaddr) {
- ret = -EINVAL;
- goto bail;
- }
-
- ipath_cdbg(MM, "pgaddr %llx vm_start=%lx len %lx port %u:%u:%u\n",
- (unsigned long long) pgaddr, vma->vm_start,
- vma->vm_end - vma->vm_start, dd->ipath_unit,
- pd->port_port, subport_fp(fp));
-
- /*
- * Physical addresses must fit in 40 bits for our hardware.
- * Check for kernel virtual addresses first, anything else must
- * match a HW or memory address.
- */
- ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp));
- if (ret) {
- if (ret > 0)
- ret = 0;
- goto bail;
- }
-
- ureg = dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port;
- if (!pd->port_subport_cnt) {
- /* port is not shared */
- piocnt = pd->port_piocnt;
- piobufs = pd->port_piobufs;
- } else if (!subport_fp(fp)) {
- /* caller is the master */
- piocnt = (pd->port_piocnt / pd->port_subport_cnt) +
- (pd->port_piocnt % pd->port_subport_cnt);
- piobufs = pd->port_piobufs +
- dd->ipath_palign * (pd->port_piocnt - piocnt);
- } else {
- unsigned slave = subport_fp(fp) - 1;
-
- /* caller is a slave */
- piocnt = pd->port_piocnt / pd->port_subport_cnt;
- piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave;
- }
-
- if (pgaddr == ureg)
- ret = mmap_ureg(vma, dd, ureg);
- else if (pgaddr == piobufs)
- ret = mmap_piobufs(vma, dd, pd, piobufs, piocnt);
- else if (pgaddr == dd->ipath_pioavailregs_phys)
- /* in-memory copy of pioavail registers */
- ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
- (void *) dd->ipath_pioavailregs_dma,
- "pioavail registers");
- else if (pgaddr == pd->port_rcvegr_phys)
- ret = mmap_rcvegrbufs(vma, pd);
- else if (pgaddr == (u64) pd->port_rcvhdrq_phys)
- /*
- * The rcvhdrq itself; readonly except on HT (so have
- * to allow writable mapping), multiple pages, contiguous
- * from an i/o perspective.
- */
- ret = ipath_mmap_mem(vma, pd, pd->port_rcvhdrq_size, 1,
- pd->port_rcvhdrq,
- "rcvhdrq");
- else if (pgaddr == (u64) pd->port_rcvhdrqtailaddr_phys)
- /* in-memory copy of rcvhdrq tail register */
- ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
- pd->port_rcvhdrtail_kvaddr,
- "rcvhdrq tail");
- else
- ret = -EINVAL;
-
- vma->vm_private_data = NULL;
-
- if (ret < 0)
- dev_info(&dd->pcidev->dev,
- "Failure %d on off %llx len %lx\n",
- -ret, (unsigned long long)pgaddr,
- vma->vm_end - vma->vm_start);
-bail:
- return ret;
-}
-
-static unsigned ipath_poll_hdrqfull(struct ipath_portdata *pd)
-{
- unsigned pollflag = 0;
-
- if ((pd->poll_type & IPATH_POLL_TYPE_OVERFLOW) &&
- pd->port_hdrqfull != pd->port_hdrqfull_poll) {
- pollflag |= POLLIN | POLLRDNORM;
- pd->port_hdrqfull_poll = pd->port_hdrqfull;
- }
-
- return pollflag;
-}
-
-static unsigned int ipath_poll_urgent(struct ipath_portdata *pd,
- struct file *fp,
- struct poll_table_struct *pt)
-{
- unsigned pollflag = 0;
- struct ipath_devdata *dd;
-
- dd = pd->port_dd;
-
- /* variable access in ipath_poll_hdrqfull() needs this */
- rmb();
- pollflag = ipath_poll_hdrqfull(pd);
-
- if (pd->port_urgent != pd->port_urgent_poll) {
- pollflag |= POLLIN | POLLRDNORM;
- pd->port_urgent_poll = pd->port_urgent;
- }
-
- if (!pollflag) {
- /* this saves a spin_lock/unlock in interrupt handler... */
- set_bit(IPATH_PORT_WAITING_URG, &pd->port_flag);
- /* flush waiting flag so don't miss an event... */
- wmb();
- poll_wait(fp, &pd->port_wait, pt);
- }
-
- return pollflag;
-}
-
-static unsigned int ipath_poll_next(struct ipath_portdata *pd,
- struct file *fp,
- struct poll_table_struct *pt)
-{
- u32 head;
- u32 tail;
- unsigned pollflag = 0;
- struct ipath_devdata *dd;
-
- dd = pd->port_dd;
-
- /* variable access in ipath_poll_hdrqfull() needs this */
- rmb();
- pollflag = ipath_poll_hdrqfull(pd);
-
- head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port);
- if (pd->port_rcvhdrtail_kvaddr)
- tail = ipath_get_rcvhdrtail(pd);
- else
- tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
-
- if (head != tail)
- pollflag |= POLLIN | POLLRDNORM;
- else {
- /* this saves a spin_lock/unlock in interrupt handler */
- set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
- /* flush waiting flag so we don't miss an event */
- wmb();
-
- set_bit(pd->port_port + dd->ipath_r_intravail_shift,
- &dd->ipath_rcvctrl);
-
- ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
- dd->ipath_rcvctrl);
-
- if (dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */
- ipath_write_ureg(dd, ur_rcvhdrhead,
- dd->ipath_rhdrhead_intr_off | head,
- pd->port_port);
-
- poll_wait(fp, &pd->port_wait, pt);
- }
-
- return pollflag;
-}
-
-static unsigned int ipath_poll(struct file *fp,
- struct poll_table_struct *pt)
-{
- struct ipath_portdata *pd;
- unsigned pollflag;
-
- pd = port_fp(fp);
- if (!pd)
- pollflag = 0;
- else if (pd->poll_type & IPATH_POLL_TYPE_URGENT)
- pollflag = ipath_poll_urgent(pd, fp, pt);
- else
- pollflag = ipath_poll_next(pd, fp, pt);
-
- return pollflag;
-}
-
-static int ipath_supports_subports(int user_swmajor, int user_swminor)
-{
- /* no subport implementation prior to software version 1.3 */
- return (user_swmajor > 1) || (user_swminor >= 3);
-}
-
-static int ipath_compatible_subports(int user_swmajor, int user_swminor)
-{
- /* this code is written long-hand for clarity */
- if (IPATH_USER_SWMAJOR != user_swmajor) {
- /* no promise of compatibility if major mismatch */
- return 0;
- }
- if (IPATH_USER_SWMAJOR == 1) {
- switch (IPATH_USER_SWMINOR) {
- case 0:
- case 1:
- case 2:
- /* no subport implementation so cannot be compatible */
- return 0;
- case 3:
- /* 3 is only compatible with itself */
- return user_swminor == 3;
- default:
- /* >= 4 are compatible (or are expected to be) */
- return user_swminor >= 4;
- }
- }
- /* make no promises yet for future major versions */
- return 0;
-}
-
-static int init_subports(struct ipath_devdata *dd,
- struct ipath_portdata *pd,
- const struct ipath_user_info *uinfo)
-{
- int ret = 0;
- unsigned num_subports;
- size_t size;
-
- /*
- * If the user is requesting zero subports,
- * skip the subport allocation.
- */
- if (uinfo->spu_subport_cnt <= 0)
- goto bail;
-
- /* Self-consistency check for ipath_compatible_subports() */
- if (ipath_supports_subports(IPATH_USER_SWMAJOR, IPATH_USER_SWMINOR) &&
- !ipath_compatible_subports(IPATH_USER_SWMAJOR,
- IPATH_USER_SWMINOR)) {
- dev_info(&dd->pcidev->dev,
- "Inconsistent ipath_compatible_subports()\n");
- goto bail;
- }
-
- /* Check for subport compatibility */
- if (!ipath_compatible_subports(uinfo->spu_userversion >> 16,
- uinfo->spu_userversion & 0xffff)) {
- dev_info(&dd->pcidev->dev,
- "Mismatched user version (%d.%d) and driver "
- "version (%d.%d) while port sharing. Ensure "
- "that driver and library are from the same "
- "release.\n",
- (int) (uinfo->spu_userversion >> 16),
- (int) (uinfo->spu_userversion & 0xffff),
- IPATH_USER_SWMAJOR,
- IPATH_USER_SWMINOR);
- goto bail;
- }
- if (uinfo->spu_subport_cnt > INFINIPATH_MAX_SUBPORT) {
- ret = -EINVAL;
- goto bail;
- }
-
- num_subports = uinfo->spu_subport_cnt;
- pd->subport_uregbase = vzalloc(PAGE_SIZE * num_subports);
- if (!pd->subport_uregbase) {
- ret = -ENOMEM;
- goto bail;
- }
- /* Note: pd->port_rcvhdrq_size isn't initialized yet. */
- size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
- sizeof(u32), PAGE_SIZE) * num_subports;
- pd->subport_rcvhdr_base = vzalloc(size);
- if (!pd->subport_rcvhdr_base) {
- ret = -ENOMEM;
- goto bail_ureg;
- }
-
- pd->subport_rcvegrbuf = vzalloc(pd->port_rcvegrbuf_chunks *
- pd->port_rcvegrbuf_size *
- num_subports);
- if (!pd->subport_rcvegrbuf) {
- ret = -ENOMEM;
- goto bail_rhdr;
- }
-
- pd->port_subport_cnt = uinfo->spu_subport_cnt;
- pd->port_subport_id = uinfo->spu_subport_id;
- pd->active_slaves = 1;
- set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
- goto bail;
-
-bail_rhdr:
- vfree(pd->subport_rcvhdr_base);
-bail_ureg:
- vfree(pd->subport_uregbase);
- pd->subport_uregbase = NULL;
-bail:
- return ret;
-}
-
-static int try_alloc_port(struct ipath_devdata *dd, int port,
- struct file *fp,
- const struct ipath_user_info *uinfo)
-{
- struct ipath_portdata *pd;
- int ret;
-
- if (!(pd = dd->ipath_pd[port])) {
- void *ptmp;
-
- pd = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL);
-
- /*
- * Allocate memory for use in ipath_tid_update() just once
- * at open, not per call. Reduces cost of expected send
- * setup.
- */
- ptmp = kmalloc(dd->ipath_rcvtidcnt * sizeof(u16) +
- dd->ipath_rcvtidcnt * sizeof(struct page **),
- GFP_KERNEL);
- if (!pd || !ptmp) {
- ipath_dev_err(dd, "Unable to allocate portdata "
- "memory, failing open\n");
- ret = -ENOMEM;
- kfree(pd);
- kfree(ptmp);
- goto bail;
- }
- dd->ipath_pd[port] = pd;
- dd->ipath_pd[port]->port_port = port;
- dd->ipath_pd[port]->port_dd = dd;
- dd->ipath_pd[port]->port_tid_pg_list = ptmp;
- init_waitqueue_head(&dd->ipath_pd[port]->port_wait);
- }
- if (!pd->port_cnt) {
- pd->userversion = uinfo->spu_userversion;
- init_user_egr_sizes(pd);
- if ((ret = init_subports(dd, pd, uinfo)) != 0)
- goto bail;
- ipath_cdbg(PROC, "%s[%u] opened unit:port %u:%u\n",
- current->comm, current->pid, dd->ipath_unit,
- port);
- pd->port_cnt = 1;
- port_fp(fp) = pd;
- pd->port_pid = get_pid(task_pid(current));
- strlcpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
- ipath_stats.sps_ports++;
- ret = 0;
- } else
- ret = -EBUSY;
-
-bail:
- return ret;
-}
-
-static inline int usable(struct ipath_devdata *dd)
-{
- return dd &&
- (dd->ipath_flags & IPATH_PRESENT) &&
- dd->ipath_kregbase &&
- dd->ipath_lid &&
- !(dd->ipath_flags & (IPATH_LINKDOWN | IPATH_DISABLED
- | IPATH_LINKUNK));
-}
-
-static int find_free_port(int unit, struct file *fp,
- const struct ipath_user_info *uinfo)
-{
- struct ipath_devdata *dd = ipath_lookup(unit);
- int ret, i;
-
- if (!dd) {
- ret = -ENODEV;
- goto bail;
- }
-
- if (!usable(dd)) {
- ret = -ENETDOWN;
- goto bail;
- }
-
- for (i = 1; i < dd->ipath_cfgports; i++) {
- ret = try_alloc_port(dd, i, fp, uinfo);
- if (ret != -EBUSY)
- goto bail;
- }
- ret = -EBUSY;
-
-bail:
- return ret;
-}
-
-static int find_best_unit(struct file *fp,
- const struct ipath_user_info *uinfo)
-{
- int ret = 0, i, prefunit = -1, devmax;
- int maxofallports, npresent, nup;
- int ndev;
-
- devmax = ipath_count_units(&npresent, &nup, &maxofallports);
-
- /*
- * This code is present to allow a knowledgeable person to
- * specify the layout of processes to processors before opening
- * this driver, and then we'll assign the process to the "closest"
- * InfiniPath chip to that processor (we assume reasonable connectivity,
- * for now). This code assumes that if affinity has been set
- * before this point, that at most one cpu is set; for now this
- * is reasonable. I check for both cpumask_empty() and cpumask_full(),
- * in case some kernel variant sets none of the bits when no
- * affinity is set. 2.6.11 and 12 kernels have all present
- * cpus set. Some day we'll have to fix it up further to handle
- * a cpu subset. This algorithm fails for two HT chips connected
- * in tunnel fashion. Eventually this needs real topology
- * information. There may be some issues with dual core numbering
- * as well. This needs more work prior to release.
- */
- if (!cpumask_empty(tsk_cpus_allowed(current)) &&
- !cpumask_full(tsk_cpus_allowed(current))) {
- int ncpus = num_online_cpus(), curcpu = -1, nset = 0;
- get_online_cpus();
- for_each_online_cpu(i)
- if (cpumask_test_cpu(i, tsk_cpus_allowed(current))) {
- ipath_cdbg(PROC, "%s[%u] affinity set for "
- "cpu %d/%d\n", current->comm,
- current->pid, i, ncpus);
- curcpu = i;
- nset++;
- }
- put_online_cpus();
- if (curcpu != -1 && nset != ncpus) {
- if (npresent) {
- prefunit = curcpu / (ncpus / npresent);
- ipath_cdbg(PROC,"%s[%u] %d chips, %d cpus, "
- "%d cpus/chip, select unit %d\n",
- current->comm, current->pid,
- npresent, ncpus, ncpus / npresent,
- prefunit);
- }
- }
- }
-
- /*
- * user ports start at 1, kernel port is 0
- * For now, we do round-robin access across all chips
- */
-
- if (prefunit != -1)
- devmax = prefunit + 1;
-recheck:
- for (i = 1; i < maxofallports; i++) {
- for (ndev = prefunit != -1 ? prefunit : 0; ndev < devmax;
- ndev++) {
- struct ipath_devdata *dd = ipath_lookup(ndev);
-
- if (!usable(dd))
- continue; /* can't use this unit */
- if (i >= dd->ipath_cfgports)
- /*
- * Maxed out on users of this unit. Try
- * next.
- */
- continue;
- ret = try_alloc_port(dd, i, fp, uinfo);
- if (!ret)
- goto done;
- }
- }
-
- if (npresent) {
- if (nup == 0) {
- ret = -ENETDOWN;
- ipath_dbg("No ports available (none initialized "
- "and ready)\n");
- } else {
- if (prefunit > 0) {
- /* if started above 0, retry from 0 */
- ipath_cdbg(PROC,
- "%s[%u] no ports on prefunit "
- "%d, clear and re-check\n",
- current->comm, current->pid,
- prefunit);
- devmax = ipath_count_units(NULL, NULL,
- NULL);
- prefunit = -1;
- goto recheck;
- }
- ret = -EBUSY;
- ipath_dbg("No ports available\n");
- }
- } else {
- ret = -ENXIO;
- ipath_dbg("No boards found\n");
- }
-
-done:
- return ret;
-}
-
-static int find_shared_port(struct file *fp,
- const struct ipath_user_info *uinfo)
-{
- int devmax, ndev, i;
- int ret = 0;
-
- devmax = ipath_count_units(NULL, NULL, NULL);
-
- for (ndev = 0; ndev < devmax; ndev++) {
- struct ipath_devdata *dd = ipath_lookup(ndev);
-
- if (!usable(dd))
- continue;
- for (i = 1; i < dd->ipath_cfgports; i++) {
- struct ipath_portdata *pd = dd->ipath_pd[i];
-
- /* Skip ports which are not yet open */
- if (!pd || !pd->port_cnt)
- continue;
- /* Skip port if it doesn't match the requested one */
- if (pd->port_subport_id != uinfo->spu_subport_id)
- continue;
- /* Verify the sharing process matches the master */
- if (pd->port_subport_cnt != uinfo->spu_subport_cnt ||
- pd->userversion != uinfo->spu_userversion ||
- pd->port_cnt >= pd->port_subport_cnt) {
- ret = -EINVAL;
- goto done;
- }
- port_fp(fp) = pd;
- subport_fp(fp) = pd->port_cnt++;
- pd->port_subpid[subport_fp(fp)] =
- get_pid(task_pid(current));
- tidcursor_fp(fp) = 0;
- pd->active_slaves |= 1 << subport_fp(fp);
- ipath_cdbg(PROC,
- "%s[%u] %u sharing %s[%u] unit:port %u:%u\n",
- current->comm, current->pid,
- subport_fp(fp),
- pd->port_comm, pid_nr(pd->port_pid),
- dd->ipath_unit, pd->port_port);
- ret = 1;
- goto done;
- }
- }
-
-done:
- return ret;
-}
-
-static int ipath_open(struct inode *in, struct file *fp)
-{
- /* The real work is performed later in ipath_assign_port() */
- fp->private_data = kzalloc(sizeof(struct ipath_filedata), GFP_KERNEL);
- return fp->private_data ? 0 : -ENOMEM;
-}
-
-/* Get port early, so can set affinity prior to memory allocation */
-static int ipath_assign_port(struct file *fp,
- const struct ipath_user_info *uinfo)
-{
- int ret;
- int i_minor;
- unsigned swmajor, swminor;
-
- /* Check to be sure we haven't already initialized this file */
- if (port_fp(fp)) {
- ret = -EINVAL;
- goto done;
- }
-
- /* for now, if major version is different, bail */
- swmajor = uinfo->spu_userversion >> 16;
- if (swmajor != IPATH_USER_SWMAJOR) {
- ipath_dbg("User major version %d not same as driver "
- "major %d\n", uinfo->spu_userversion >> 16,
- IPATH_USER_SWMAJOR);
- ret = -ENODEV;
- goto done;
- }
-
- swminor = uinfo->spu_userversion & 0xffff;
- if (swminor != IPATH_USER_SWMINOR)
- ipath_dbg("User minor version %d not same as driver "
- "minor %d\n", swminor, IPATH_USER_SWMINOR);
-
- mutex_lock(&ipath_mutex);
-
- if (ipath_compatible_subports(swmajor, swminor) &&
- uinfo->spu_subport_cnt &&
- (ret = find_shared_port(fp, uinfo))) {
- if (ret > 0)
- ret = 0;
- goto done_chk_sdma;
- }
-
- i_minor = iminor(file_inode(fp)) - IPATH_USER_MINOR_BASE;
- ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n",
- (long)file_inode(fp)->i_rdev, i_minor);
-
- if (i_minor)
- ret = find_free_port(i_minor - 1, fp, uinfo);
- else
- ret = find_best_unit(fp, uinfo);
-
-done_chk_sdma:
- if (!ret) {
- struct ipath_filedata *fd = fp->private_data;
- const struct ipath_portdata *pd = fd->pd;
- const struct ipath_devdata *dd = pd->port_dd;
-
- fd->pq = ipath_user_sdma_queue_create(&dd->pcidev->dev,
- dd->ipath_unit,
- pd->port_port,
- fd->subport);
-
- if (!fd->pq)
- ret = -ENOMEM;
- }
-
- mutex_unlock(&ipath_mutex);
-
-done:
- return ret;
-}
-
-
-static int ipath_do_user_init(struct file *fp,
- const struct ipath_user_info *uinfo)
-{
- int ret;
- struct ipath_portdata *pd = port_fp(fp);
- struct ipath_devdata *dd;
- u32 head32;
-
- /* Subports don't need to initialize anything since master did it. */
- if (subport_fp(fp)) {
- ret = wait_event_interruptible(pd->port_wait,
- !test_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag));
- goto done;
- }
-
- dd = pd->port_dd;
-
- if (uinfo->spu_rcvhdrsize) {
- ret = ipath_setrcvhdrsize(dd, uinfo->spu_rcvhdrsize);
- if (ret)
- goto done;
- }
-
- /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */
-
- /* some ports may get extra buffers, calculate that here */
- if (pd->port_port <= dd->ipath_ports_extrabuf)
- pd->port_piocnt = dd->ipath_pbufsport + 1;
- else
- pd->port_piocnt = dd->ipath_pbufsport;
-
- /* for right now, kernel piobufs are at end, so port 1 is at 0 */
- if (pd->port_port <= dd->ipath_ports_extrabuf)
- pd->port_pio_base = (dd->ipath_pbufsport + 1)
- * (pd->port_port - 1);
- else
- pd->port_pio_base = dd->ipath_ports_extrabuf +
- dd->ipath_pbufsport * (pd->port_port - 1);
- pd->port_piobufs = dd->ipath_piobufbase +
- pd->port_pio_base * dd->ipath_palign;
- ipath_cdbg(VERBOSE, "piobuf base for port %u is 0x%x, piocnt %u,"
- " first pio %u\n", pd->port_port, pd->port_piobufs,
- pd->port_piocnt, pd->port_pio_base);
- ipath_chg_pioavailkernel(dd, pd->port_pio_base, pd->port_piocnt, 0);
-
- /*
- * Now allocate the rcvhdr Q and eager TIDs; skip the TID
- * array for time being. If pd->port_port > chip-supported,
- * we need to do extra stuff here to handle by handling overflow
- * through port 0, someday
- */
- ret = ipath_create_rcvhdrq(dd, pd);
- if (!ret)
- ret = ipath_create_user_egr(pd);
- if (ret)
- goto done;
-
- /*
- * set the eager head register for this port to the current values
- * of the tail pointers, since we don't know if they were
- * updated on last use of the port.
- */
- head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port);
- ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port);
- pd->port_lastrcvhdrqtail = -1;
- ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n",
- pd->port_port, head32);
- pd->port_tidcursor = 0; /* start at beginning after open */
-
- /* initialize poll variables... */
- pd->port_urgent = 0;
- pd->port_urgent_poll = 0;
- pd->port_hdrqfull_poll = pd->port_hdrqfull;
-
- /*
- * Now enable the port for receive.
- * For chips that are set to DMA the tail register to memory
- * when they change (and when the update bit transitions from
- * 0 to 1. So for those chips, we turn it off and then back on.
- * This will (very briefly) affect any other open ports, but the
- * duration is very short, and therefore isn't an issue. We
- * explicitly set the in-memory tail copy to 0 beforehand, so we
- * don't have to wait to be sure the DMA update has happened
- * (chip resets head/tail to 0 on transition to enable).
- */
- set_bit(dd->ipath_r_portenable_shift + pd->port_port,
- &dd->ipath_rcvctrl);
- if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
- if (pd->port_rcvhdrtail_kvaddr)
- ipath_clear_rcvhdrtail(pd);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
- dd->ipath_rcvctrl &
- ~(1ULL << dd->ipath_r_tailupd_shift));
- }
- ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
- dd->ipath_rcvctrl);
- /* Notify any waiting slaves */
- if (pd->port_subport_cnt) {
- clear_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
- wake_up(&pd->port_wait);
- }
-done:
- return ret;
-}
-
-/**
- * unlock_exptid - unlock any expected TID entries port still had in use
- * @pd: port
- *
- * We don't actually update the chip here, because we do a bulk update
- * below, using ipath_f_clear_tids.
- */
-static void unlock_expected_tids(struct ipath_portdata *pd)
-{
- struct ipath_devdata *dd = pd->port_dd;
- int port_tidbase = pd->port_port * dd->ipath_rcvtidcnt;
- int i, cnt = 0, maxtid = port_tidbase + dd->ipath_rcvtidcnt;
-
- ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n",
- pd->port_port);
- for (i = port_tidbase; i < maxtid; i++) {
- struct page *ps = dd->ipath_pageshadow[i];
-
- if (!ps)
- continue;
-
- dd->ipath_pageshadow[i] = NULL;
- pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i],
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
- ipath_release_user_pages_on_close(&ps, 1);
- cnt++;
- ipath_stats.sps_pageunlocks++;
- }
- if (cnt)
- ipath_cdbg(VERBOSE, "Port %u locked %u expTID entries\n",
- pd->port_port, cnt);
-
- if (ipath_stats.sps_pagelocks || ipath_stats.sps_pageunlocks)
- ipath_cdbg(VERBOSE, "%llu pages locked, %llu unlocked\n",
- (unsigned long long) ipath_stats.sps_pagelocks,
- (unsigned long long)
- ipath_stats.sps_pageunlocks);
-}
-
-static int ipath_close(struct inode *in, struct file *fp)
-{
- struct ipath_filedata *fd;
- struct ipath_portdata *pd;
- struct ipath_devdata *dd;
- unsigned long flags;
- unsigned port;
- struct pid *pid;
-
- ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n",
- (long)in->i_rdev, fp->private_data);
-
- mutex_lock(&ipath_mutex);
-
- fd = fp->private_data;
- fp->private_data = NULL;
- pd = fd->pd;
- if (!pd) {
- mutex_unlock(&ipath_mutex);
- goto bail;
- }
-
- dd = pd->port_dd;
-
- /* drain user sdma queue */
- ipath_user_sdma_queue_drain(dd, fd->pq);
- ipath_user_sdma_queue_destroy(fd->pq);
-
- if (--pd->port_cnt) {
- /*
- * XXX If the master closes the port before the slave(s),
- * revoke the mmap for the eager receive queue so
- * the slave(s) don't wait for receive data forever.
- */
- pd->active_slaves &= ~(1 << fd->subport);
- put_pid(pd->port_subpid[fd->subport]);
- pd->port_subpid[fd->subport] = NULL;
- mutex_unlock(&ipath_mutex);
- goto bail;
- }
- /* early; no interrupt users after this */
- spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
- port = pd->port_port;
- dd->ipath_pd[port] = NULL;
- pid = pd->port_pid;
- pd->port_pid = NULL;
- spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
-
- if (pd->port_rcvwait_to || pd->port_piowait_to
- || pd->port_rcvnowait || pd->port_pionowait) {
- ipath_cdbg(VERBOSE, "port%u, %u rcv, %u pio wait timeo; "
- "%u rcv %u, pio already\n",
- pd->port_port, pd->port_rcvwait_to,
- pd->port_piowait_to, pd->port_rcvnowait,
- pd->port_pionowait);
- pd->port_rcvwait_to = pd->port_piowait_to =
- pd->port_rcvnowait = pd->port_pionowait = 0;
- }
- if (pd->port_flag) {
- ipath_cdbg(PROC, "port %u port_flag set: 0x%lx\n",
- pd->port_port, pd->port_flag);
- pd->port_flag = 0;
- }
-
- if (dd->ipath_kregbase) {
- /* atomically clear receive enable port and intr avail. */
- clear_bit(dd->ipath_r_portenable_shift + port,
- &dd->ipath_rcvctrl);
- clear_bit(pd->port_port + dd->ipath_r_intravail_shift,
- &dd->ipath_rcvctrl);
- ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl,
- dd->ipath_rcvctrl);
- /* and read back from chip to be sure that nothing
- * else is in flight when we do the rest */
- (void)ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-
- /* clean up the pkeys for this port user */
- ipath_clean_part_key(pd, dd);
- /*
- * be paranoid, and never write 0's to these, just use an
- * unused part of the port 0 tail page. Of course,
- * rcvhdraddr points to a large chunk of memory, so this
- * could still trash things, but at least it won't trash
- * page 0, and by disabling the port, it should stop "soon",
- * even if a packet or two is in already in flight after we
- * disabled the port.
- */
- ipath_write_kreg_port(dd,
- dd->ipath_kregs->kr_rcvhdrtailaddr, port,
- dd->ipath_dummy_hdrq_phys);
- ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
- pd->port_port, dd->ipath_dummy_hdrq_phys);
-
- ipath_disarm_piobufs(dd, pd->port_pio_base, pd->port_piocnt);
- ipath_chg_pioavailkernel(dd, pd->port_pio_base,
- pd->port_piocnt, 1);
-
- dd->ipath_f_clear_tids(dd, pd->port_port);
-
- if (dd->ipath_pageshadow)
- unlock_expected_tids(pd);
- ipath_stats.sps_ports--;
- ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
- pd->port_comm, pid_nr(pid),
- dd->ipath_unit, port);
- }
-
- put_pid(pid);
- mutex_unlock(&ipath_mutex);
- ipath_free_pddata(dd, pd); /* after releasing the mutex */
-
-bail:
- kfree(fd);
- return 0;
-}
-
-static int ipath_port_info(struct ipath_portdata *pd, u16 subport,
- struct ipath_port_info __user *uinfo)
-{
- struct ipath_port_info info;
- int nup;
- int ret;
- size_t sz;
-
- (void) ipath_count_units(NULL, &nup, NULL);
- info.num_active = nup;
- info.unit = pd->port_dd->ipath_unit;
- info.port = pd->port_port;
- info.subport = subport;
- /* Don't return new fields if old library opened the port. */
- if (ipath_supports_subports(pd->userversion >> 16,
- pd->userversion & 0xffff)) {
- /* Number of user ports available for this device. */
- info.num_ports = pd->port_dd->ipath_cfgports - 1;
- info.num_subports = pd->port_subport_cnt;
- sz = sizeof(info);
- } else
- sz = sizeof(info) - 2 * sizeof(u16);
-
- if (copy_to_user(uinfo, &info, sz)) {
- ret = -EFAULT;
- goto bail;
- }
- ret = 0;
-
-bail:
- return ret;
-}
-
-static int ipath_get_slave_info(struct ipath_portdata *pd,
- void __user *slave_mask_addr)
-{
- int ret = 0;
-
- if (copy_to_user(slave_mask_addr, &pd->active_slaves, sizeof(u32)))
- ret = -EFAULT;
- return ret;
-}
-
-static int ipath_sdma_get_inflight(struct ipath_user_sdma_queue *pq,
- u32 __user *inflightp)
-{
- const u32 val = ipath_user_sdma_inflight_counter(pq);
-
- if (put_user(val, inflightp))
- return -EFAULT;
-
- return 0;
-}
-
-static int ipath_sdma_get_complete(struct ipath_devdata *dd,
- struct ipath_user_sdma_queue *pq,
- u32 __user *completep)
-{
- u32 val;
- int err;
-
- err = ipath_user_sdma_make_progress(dd, pq);
- if (err < 0)
- return err;
-
- val = ipath_user_sdma_complete_counter(pq);
- if (put_user(val, completep))
- return -EFAULT;
-
- return 0;
-}
-
-static ssize_t ipath_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off)
-{
- const struct ipath_cmd __user *ucmd;
- struct ipath_portdata *pd;
- const void __user *src;
- size_t consumed, copy;
- struct ipath_cmd cmd;
- ssize_t ret = 0;
- void *dest;
-
- if (count < sizeof(cmd.type)) {
- ret = -EINVAL;
- goto bail;
- }
-
- ucmd = (const struct ipath_cmd __user *) data;
-
- if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
- ret = -EFAULT;
- goto bail;
- }
-
- consumed = sizeof(cmd.type);
-
- switch (cmd.type) {
- case IPATH_CMD_ASSIGN_PORT:
- case __IPATH_CMD_USER_INIT:
- case IPATH_CMD_USER_INIT:
- copy = sizeof(cmd.cmd.user_info);
- dest = &cmd.cmd.user_info;
- src = &ucmd->cmd.user_info;
- break;
- case IPATH_CMD_RECV_CTRL:
- copy = sizeof(cmd.cmd.recv_ctrl);
- dest = &cmd.cmd.recv_ctrl;
- src = &ucmd->cmd.recv_ctrl;
- break;
- case IPATH_CMD_PORT_INFO:
- copy = sizeof(cmd.cmd.port_info);
- dest = &cmd.cmd.port_info;
- src = &ucmd->cmd.port_info;
- break;
- case IPATH_CMD_TID_UPDATE:
- case IPATH_CMD_TID_FREE:
- copy = sizeof(cmd.cmd.tid_info);
- dest = &cmd.cmd.tid_info;
- src = &ucmd->cmd.tid_info;
- break;
- case IPATH_CMD_SET_PART_KEY:
- copy = sizeof(cmd.cmd.part_key);
- dest = &cmd.cmd.part_key;
- src = &ucmd->cmd.part_key;
- break;
- case __IPATH_CMD_SLAVE_INFO:
- copy = sizeof(cmd.cmd.slave_mask_addr);
- dest = &cmd.cmd.slave_mask_addr;
- src = &ucmd->cmd.slave_mask_addr;
- break;
- case IPATH_CMD_PIOAVAILUPD: // force an update of PIOAvail reg
- copy = 0;
- src = NULL;
- dest = NULL;
- break;
- case IPATH_CMD_POLL_TYPE:
- copy = sizeof(cmd.cmd.poll_type);
- dest = &cmd.cmd.poll_type;
- src = &ucmd->cmd.poll_type;
- break;
- case IPATH_CMD_ARMLAUNCH_CTRL:
- copy = sizeof(cmd.cmd.armlaunch_ctrl);
- dest = &cmd.cmd.armlaunch_ctrl;
- src = &ucmd->cmd.armlaunch_ctrl;
- break;
- case IPATH_CMD_SDMA_INFLIGHT:
- copy = sizeof(cmd.cmd.sdma_inflight);
- dest = &cmd.cmd.sdma_inflight;
- src = &ucmd->cmd.sdma_inflight;
- break;
- case IPATH_CMD_SDMA_COMPLETE:
- copy = sizeof(cmd.cmd.sdma_complete);
- dest = &cmd.cmd.sdma_complete;
- src = &ucmd->cmd.sdma_complete;
- break;
- default:
- ret = -EINVAL;
- goto bail;
- }
-
- if (copy) {
- if ((count - consumed) < copy) {
- ret = -EINVAL;
- goto bail;
- }
-
- if (copy_from_user(dest, src, copy)) {
- ret = -EFAULT;
- goto bail;
- }
-
- consumed += copy;
- }
-
- pd = port_fp(fp);
- if (!pd && cmd.type != __IPATH_CMD_USER_INIT &&
- cmd.type != IPATH_CMD_ASSIGN_PORT) {
- ret = -EINVAL;
- goto bail;
- }
-
- switch (cmd.type) {
- case IPATH_CMD_ASSIGN_PORT:
- ret = ipath_assign_port(fp, &cmd.cmd.user_info);
- if (ret)
- goto bail;
- break;
- case __IPATH_CMD_USER_INIT:
- /* backwards compatibility, get port first */
- ret = ipath_assign_port(fp, &cmd.cmd.user_info);
- if (ret)
- goto bail;
- /* and fall through to current version. */
- case IPATH_CMD_USER_INIT:
- ret = ipath_do_user_init(fp, &cmd.cmd.user_info);
- if (ret)
- goto bail;
- ret = ipath_get_base_info(
- fp, (void __user *) (unsigned long)
- cmd.cmd.user_info.spu_base_info,
- cmd.cmd.user_info.spu_base_info_size);
- break;
- case IPATH_CMD_RECV_CTRL:
- ret = ipath_manage_rcvq(pd, subport_fp(fp), cmd.cmd.recv_ctrl);
- break;
- case IPATH_CMD_PORT_INFO:
- ret = ipath_port_info(pd, subport_fp(fp),
- (struct ipath_port_info __user *)
- (unsigned long) cmd.cmd.port_info);
- break;
- case IPATH_CMD_TID_UPDATE:
- ret = ipath_tid_update(pd, fp, &cmd.cmd.tid_info);
- break;
- case IPATH_CMD_TID_FREE:
- ret = ipath_tid_free(pd, subport_fp(fp), &cmd.cmd.tid_info);
- break;
- case IPATH_CMD_SET_PART_KEY:
- ret = ipath_set_part_key(pd, cmd.cmd.part_key);
- break;
- case __IPATH_CMD_SLAVE_INFO:
- ret = ipath_get_slave_info(pd,
- (void __user *) (unsigned long)
- cmd.cmd.slave_mask_addr);
- break;
- case IPATH_CMD_PIOAVAILUPD:
- ipath_force_pio_avail_update(pd->port_dd);
- break;
- case IPATH_CMD_POLL_TYPE:
- pd->poll_type = cmd.cmd.poll_type;
- break;
- case IPATH_CMD_ARMLAUNCH_CTRL:
- if (cmd.cmd.armlaunch_ctrl)
- ipath_enable_armlaunch(pd->port_dd);
- else
- ipath_disable_armlaunch(pd->port_dd);
- break;
- case IPATH_CMD_SDMA_INFLIGHT:
- ret = ipath_sdma_get_inflight(user_sdma_queue_fp(fp),
- (u32 __user *) (unsigned long)
- cmd.cmd.sdma_inflight);
- break;
- case IPATH_CMD_SDMA_COMPLETE:
- ret = ipath_sdma_get_complete(pd->port_dd,
- user_sdma_queue_fp(fp),
- (u32 __user *) (unsigned long)
- cmd.cmd.sdma_complete);
- break;
- }
-
- if (ret >= 0)
- ret = consumed;
-
-bail:
- return ret;
-}
-
-static ssize_t ipath_write_iter(struct kiocb *iocb, struct iov_iter *from)
-{
- struct file *filp = iocb->ki_filp;
- struct ipath_filedata *fp = filp->private_data;
- struct ipath_portdata *pd = port_fp(filp);
- struct ipath_user_sdma_queue *pq = fp->pq;
-
- if (!iter_is_iovec(from) || !from->nr_segs)
- return -EINVAL;
-
- return ipath_user_sdma_writev(pd->port_dd, pq, from->iov, from->nr_segs);
-}
-
-static struct class *ipath_class;
-
-static int init_cdev(int minor, char *name, const struct file_operations *fops,
- struct cdev **cdevp, struct device **devp)
-{
- const dev_t dev = MKDEV(IPATH_MAJOR, minor);
- struct cdev *cdev = NULL;
- struct device *device = NULL;
- int ret;
-
- cdev = cdev_alloc();
- if (!cdev) {
- printk(KERN_ERR IPATH_DRV_NAME
- ": Could not allocate cdev for minor %d, %s\n",
- minor, name);
- ret = -ENOMEM;
- goto done;
- }
-
- cdev->owner = THIS_MODULE;
- cdev->ops = fops;
- kobject_set_name(&cdev->kobj, name);
-
- ret = cdev_add(cdev, dev, 1);
- if (ret < 0) {
- printk(KERN_ERR IPATH_DRV_NAME
- ": Could not add cdev for minor %d, %s (err %d)\n",
- minor, name, -ret);
- goto err_cdev;
- }
-
- device = device_create(ipath_class, NULL, dev, NULL, name);
-
- if (IS_ERR(device)) {
- ret = PTR_ERR(device);
- printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
- "device for minor %d, %s (err %d)\n",
- minor, name, -ret);
- goto err_cdev;
- }
-
- goto done;
-
-err_cdev:
- cdev_del(cdev);
- cdev = NULL;
-
-done:
- if (ret >= 0) {
- *cdevp = cdev;
- *devp = device;
- } else {
- *cdevp = NULL;
- *devp = NULL;
- }
-
- return ret;
-}
-
-int ipath_cdev_init(int minor, char *name, const struct file_operations *fops,
- struct cdev **cdevp, struct device **devp)
-{
- return init_cdev(minor, name, fops, cdevp, devp);
-}
-
-static void cleanup_cdev(struct cdev **cdevp,
- struct device **devp)
-{
- struct device *dev = *devp;
-
- if (dev) {
- device_unregister(dev);
- *devp = NULL;
- }
-
- if (*cdevp) {
- cdev_del(*cdevp);
- *cdevp = NULL;
- }
-}
-
-void ipath_cdev_cleanup(struct cdev **cdevp,
- struct device **devp)
-{
- cleanup_cdev(cdevp, devp);
-}
-
-static struct cdev *wildcard_cdev;
-static struct device *wildcard_dev;
-
-static const dev_t dev = MKDEV(IPATH_MAJOR, 0);
-
-static int user_init(void)
-{
- int ret;
-
- ret = register_chrdev_region(dev, IPATH_NMINORS, IPATH_DRV_NAME);
- if (ret < 0) {
- printk(KERN_ERR IPATH_DRV_NAME ": Could not register "
- "chrdev region (err %d)\n", -ret);
- goto done;
- }
-
- ipath_class = class_create(THIS_MODULE, IPATH_DRV_NAME);
-
- if (IS_ERR(ipath_class)) {
- ret = PTR_ERR(ipath_class);
- printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
- "device class (err %d)\n", -ret);
- goto bail;
- }
-
- goto done;
-bail:
- unregister_chrdev_region(dev, IPATH_NMINORS);
-done:
- return ret;
-}
-
-static void user_cleanup(void)
-{
- if (ipath_class) {
- class_destroy(ipath_class);
- ipath_class = NULL;
- }
-
- unregister_chrdev_region(dev, IPATH_NMINORS);
-}
-
-static atomic_t user_count = ATOMIC_INIT(0);
-static atomic_t user_setup = ATOMIC_INIT(0);
-
-int ipath_user_add(struct ipath_devdata *dd)
-{
- char name[10];
- int ret;
-
- if (atomic_inc_return(&user_count) == 1) {
- ret = user_init();
- if (ret < 0) {
- ipath_dev_err(dd, "Unable to set up user support: "
- "error %d\n", -ret);
- goto bail;
- }
- ret = init_cdev(0, "ipath", &ipath_file_ops, &wildcard_cdev,
- &wildcard_dev);
- if (ret < 0) {
- ipath_dev_err(dd, "Could not create wildcard "
- "minor: error %d\n", -ret);
- goto bail_user;
- }
-
- atomic_set(&user_setup, 1);
- }
-
- snprintf(name, sizeof(name), "ipath%d", dd->ipath_unit);
-
- ret = init_cdev(dd->ipath_unit + 1, name, &ipath_file_ops,
- &dd->user_cdev, &dd->user_dev);
- if (ret < 0)
- ipath_dev_err(dd, "Could not create user minor %d, %s\n",
- dd->ipath_unit + 1, name);
-
- goto bail;
-
-bail_user:
- user_cleanup();
-bail:
- return ret;
-}
-
-void ipath_user_remove(struct ipath_devdata *dd)
-{
- cleanup_cdev(&dd->user_cdev, &dd->user_dev);
-
- if (atomic_dec_return(&user_count) == 0) {
- if (atomic_read(&user_setup) == 0)
- goto bail;
-
- cleanup_cdev(&wildcard_cdev, &wildcard_dev);
- user_cleanup();
-
- atomic_set(&user_setup, 0);
- }
-bail:
- return;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_fs.c b/drivers/staging/rdma/ipath/ipath_fs.c
deleted file mode 100644
index 476fcdf05acb..000000000000
--- a/drivers/staging/rdma/ipath/ipath_fs.c
+++ /dev/null
@@ -1,415 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/mount.h>
-#include <linux/pagemap.h>
-#include <linux/init.h>
-#include <linux/namei.h>
-#include <linux/slab.h>
-
-#include "ipath_kernel.h"
-
-#define IPATHFS_MAGIC 0x726a77
-
-static struct super_block *ipath_super;
-
-static int ipathfs_mknod(struct inode *dir, struct dentry *dentry,
- umode_t mode, const struct file_operations *fops,
- void *data)
-{
- int error;
- struct inode *inode = new_inode(dir->i_sb);
-
- if (!inode) {
- error = -EPERM;
- goto bail;
- }
-
- inode->i_ino = get_next_ino();
- inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- inode->i_private = data;
- if (S_ISDIR(mode)) {
- inode->i_op = &simple_dir_inode_operations;
- inc_nlink(inode);
- inc_nlink(dir);
- }
-
- inode->i_fop = fops;
-
- d_instantiate(dentry, inode);
- error = 0;
-
-bail:
- return error;
-}
-
-static int create_file(const char *name, umode_t mode,
- struct dentry *parent, struct dentry **dentry,
- const struct file_operations *fops, void *data)
-{
- int error;
-
- inode_lock(d_inode(parent));
- *dentry = lookup_one_len(name, parent, strlen(name));
- if (!IS_ERR(*dentry))
- error = ipathfs_mknod(d_inode(parent), *dentry,
- mode, fops, data);
- else
- error = PTR_ERR(*dentry);
- inode_unlock(d_inode(parent));
-
- return error;
-}
-
-static ssize_t atomic_stats_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- return simple_read_from_buffer(buf, count, ppos, &ipath_stats,
- sizeof ipath_stats);
-}
-
-static const struct file_operations atomic_stats_ops = {
- .read = atomic_stats_read,
- .llseek = default_llseek,
-};
-
-static ssize_t atomic_counters_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct infinipath_counters counters;
- struct ipath_devdata *dd;
-
- dd = file_inode(file)->i_private;
- dd->ipath_f_read_counters(dd, &counters);
-
- return simple_read_from_buffer(buf, count, ppos, &counters,
- sizeof counters);
-}
-
-static const struct file_operations atomic_counters_ops = {
- .read = atomic_counters_read,
- .llseek = default_llseek,
-};
-
-static ssize_t flash_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ipath_devdata *dd;
- ssize_t ret;
- loff_t pos;
- char *tmp;
-
- pos = *ppos;
-
- if ( pos < 0) {
- ret = -EINVAL;
- goto bail;
- }
-
- if (pos >= sizeof(struct ipath_flash)) {
- ret = 0;
- goto bail;
- }
-
- if (count > sizeof(struct ipath_flash) - pos)
- count = sizeof(struct ipath_flash) - pos;
-
- tmp = kmalloc(count, GFP_KERNEL);
- if (!tmp) {
- ret = -ENOMEM;
- goto bail;
- }
-
- dd = file_inode(file)->i_private;
- if (ipath_eeprom_read(dd, pos, tmp, count)) {
- ipath_dev_err(dd, "failed to read from flash\n");
- ret = -ENXIO;
- goto bail_tmp;
- }
-
- if (copy_to_user(buf, tmp, count)) {
- ret = -EFAULT;
- goto bail_tmp;
- }
-
- *ppos = pos + count;
- ret = count;
-
-bail_tmp:
- kfree(tmp);
-
-bail:
- return ret;
-}
-
-static ssize_t flash_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ipath_devdata *dd;
- ssize_t ret;
- loff_t pos;
- char *tmp;
-
- pos = *ppos;
-
- if (pos != 0) {
- ret = -EINVAL;
- goto bail;
- }
-
- if (count != sizeof(struct ipath_flash)) {
- ret = -EINVAL;
- goto bail;
- }
-
- tmp = memdup_user(buf, count);
- if (IS_ERR(tmp))
- return PTR_ERR(tmp);
-
- dd = file_inode(file)->i_private;
- if (ipath_eeprom_write(dd, pos, tmp, count)) {
- ret = -ENXIO;
- ipath_dev_err(dd, "failed to write to flash\n");
- goto bail_tmp;
- }
-
- *ppos = pos + count;
- ret = count;
-
-bail_tmp:
- kfree(tmp);
-
-bail:
- return ret;
-}
-
-static const struct file_operations flash_ops = {
- .read = flash_read,
- .write = flash_write,
- .llseek = default_llseek,
-};
-
-static int create_device_files(struct super_block *sb,
- struct ipath_devdata *dd)
-{
- struct dentry *dir, *tmp;
- char unit[10];
- int ret;
-
- snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
- ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
- &simple_dir_operations, dd);
- if (ret) {
- printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret);
- goto bail;
- }
-
- ret = create_file("atomic_counters", S_IFREG|S_IRUGO, dir, &tmp,
- &atomic_counters_ops, dd);
- if (ret) {
- printk(KERN_ERR "create_file(%s/atomic_counters) "
- "failed: %d\n", unit, ret);
- goto bail;
- }
-
- ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp,
- &flash_ops, dd);
- if (ret) {
- printk(KERN_ERR "create_file(%s/flash) "
- "failed: %d\n", unit, ret);
- goto bail;
- }
-
-bail:
- return ret;
-}
-
-static int remove_file(struct dentry *parent, char *name)
-{
- struct dentry *tmp;
- int ret;
-
- tmp = lookup_one_len(name, parent, strlen(name));
-
- if (IS_ERR(tmp)) {
- ret = PTR_ERR(tmp);
- goto bail;
- }
-
- spin_lock(&tmp->d_lock);
- if (simple_positive(tmp)) {
- dget_dlock(tmp);
- __d_drop(tmp);
- spin_unlock(&tmp->d_lock);
- simple_unlink(d_inode(parent), tmp);
- } else
- spin_unlock(&tmp->d_lock);
-
- ret = 0;
-bail:
- /*
- * We don't expect clients to care about the return value, but
- * it's there if they need it.
- */
- return ret;
-}
-
-static int remove_device_files(struct super_block *sb,
- struct ipath_devdata *dd)
-{
- struct dentry *dir, *root;
- char unit[10];
- int ret;
-
- root = dget(sb->s_root);
- inode_lock(d_inode(root));
- snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
- dir = lookup_one_len(unit, root, strlen(unit));
-
- if (IS_ERR(dir)) {
- ret = PTR_ERR(dir);
- printk(KERN_ERR "Lookup of %s failed\n", unit);
- goto bail;
- }
-
- remove_file(dir, "flash");
- remove_file(dir, "atomic_counters");
- d_delete(dir);
- ret = simple_rmdir(d_inode(root), dir);
-
-bail:
- inode_unlock(d_inode(root));
- dput(root);
- return ret;
-}
-
-static int ipathfs_fill_super(struct super_block *sb, void *data,
- int silent)
-{
- struct ipath_devdata *dd, *tmp;
- unsigned long flags;
- int ret;
-
- static struct tree_descr files[] = {
- [2] = {"atomic_stats", &atomic_stats_ops, S_IRUGO},
- {""},
- };
-
- ret = simple_fill_super(sb, IPATHFS_MAGIC, files);
- if (ret) {
- printk(KERN_ERR "simple_fill_super failed: %d\n", ret);
- goto bail;
- }
-
- spin_lock_irqsave(&ipath_devs_lock, flags);
-
- list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
- spin_unlock_irqrestore(&ipath_devs_lock, flags);
- ret = create_device_files(sb, dd);
- if (ret)
- goto bail;
- spin_lock_irqsave(&ipath_devs_lock, flags);
- }
-
- spin_unlock_irqrestore(&ipath_devs_lock, flags);
-
-bail:
- return ret;
-}
-
-static struct dentry *ipathfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
-{
- struct dentry *ret;
- ret = mount_single(fs_type, flags, data, ipathfs_fill_super);
- if (!IS_ERR(ret))
- ipath_super = ret->d_sb;
- return ret;
-}
-
-static void ipathfs_kill_super(struct super_block *s)
-{
- kill_litter_super(s);
- ipath_super = NULL;
-}
-
-int ipathfs_add_device(struct ipath_devdata *dd)
-{
- int ret;
-
- if (ipath_super == NULL) {
- ret = 0;
- goto bail;
- }
-
- ret = create_device_files(ipath_super, dd);
-
-bail:
- return ret;
-}
-
-int ipathfs_remove_device(struct ipath_devdata *dd)
-{
- int ret;
-
- if (ipath_super == NULL) {
- ret = 0;
- goto bail;
- }
-
- ret = remove_device_files(ipath_super, dd);
-
-bail:
- return ret;
-}
-
-static struct file_system_type ipathfs_fs_type = {
- .owner = THIS_MODULE,
- .name = "ipathfs",
- .mount = ipathfs_mount,
- .kill_sb = ipathfs_kill_super,
-};
-MODULE_ALIAS_FS("ipathfs");
-
-int __init ipath_init_ipathfs(void)
-{
- return register_filesystem(&ipathfs_fs_type);
-}
-
-void __exit ipath_exit_ipathfs(void)
-{
- unregister_filesystem(&ipathfs_fs_type);
-}
diff --git a/drivers/staging/rdma/ipath/ipath_iba6110.c b/drivers/staging/rdma/ipath/ipath_iba6110.c
deleted file mode 100644
index 5f13572a5e24..000000000000
--- a/drivers/staging/rdma/ipath/ipath_iba6110.c
+++ /dev/null
@@ -1,1939 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file contains all of the code that is specific to the InfiniPath
- * HT chip.
- */
-
-#include <linux/vmalloc.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/htirq.h>
-#include <rdma/ib_verbs.h>
-
-#include "ipath_kernel.h"
-#include "ipath_registers.h"
-
-static void ipath_setup_ht_setextled(struct ipath_devdata *, u64, u64);
-
-
-/*
- * This lists the InfiniPath registers, in the actual chip layout.
- * This structure should never be directly accessed.
- *
- * The names are in InterCap form because they're taken straight from
- * the chip specification. Since they're only used in this file, they
- * don't pollute the rest of the source.
-*/
-
-struct _infinipath_do_not_use_kernel_regs {
- unsigned long long Revision;
- unsigned long long Control;
- unsigned long long PageAlign;
- unsigned long long PortCnt;
- unsigned long long DebugPortSelect;
- unsigned long long DebugPort;
- unsigned long long SendRegBase;
- unsigned long long UserRegBase;
- unsigned long long CounterRegBase;
- unsigned long long Scratch;
- unsigned long long ReservedMisc1;
- unsigned long long InterruptConfig;
- unsigned long long IntBlocked;
- unsigned long long IntMask;
- unsigned long long IntStatus;
- unsigned long long IntClear;
- unsigned long long ErrorMask;
- unsigned long long ErrorStatus;
- unsigned long long ErrorClear;
- unsigned long long HwErrMask;
- unsigned long long HwErrStatus;
- unsigned long long HwErrClear;
- unsigned long long HwDiagCtrl;
- unsigned long long MDIO;
- unsigned long long IBCStatus;
- unsigned long long IBCCtrl;
- unsigned long long ExtStatus;
- unsigned long long ExtCtrl;
- unsigned long long GPIOOut;
- unsigned long long GPIOMask;
- unsigned long long GPIOStatus;
- unsigned long long GPIOClear;
- unsigned long long RcvCtrl;
- unsigned long long RcvBTHQP;
- unsigned long long RcvHdrSize;
- unsigned long long RcvHdrCnt;
- unsigned long long RcvHdrEntSize;
- unsigned long long RcvTIDBase;
- unsigned long long RcvTIDCnt;
- unsigned long long RcvEgrBase;
- unsigned long long RcvEgrCnt;
- unsigned long long RcvBufBase;
- unsigned long long RcvBufSize;
- unsigned long long RxIntMemBase;
- unsigned long long RxIntMemSize;
- unsigned long long RcvPartitionKey;
- unsigned long long ReservedRcv[10];
- unsigned long long SendCtrl;
- unsigned long long SendPIOBufBase;
- unsigned long long SendPIOSize;
- unsigned long long SendPIOBufCnt;
- unsigned long long SendPIOAvailAddr;
- unsigned long long TxIntMemBase;
- unsigned long long TxIntMemSize;
- unsigned long long ReservedSend[9];
- unsigned long long SendBufferError;
- unsigned long long SendBufferErrorCONT1;
- unsigned long long SendBufferErrorCONT2;
- unsigned long long SendBufferErrorCONT3;
- unsigned long long ReservedSBE[4];
- unsigned long long RcvHdrAddr0;
- unsigned long long RcvHdrAddr1;
- unsigned long long RcvHdrAddr2;
- unsigned long long RcvHdrAddr3;
- unsigned long long RcvHdrAddr4;
- unsigned long long RcvHdrAddr5;
- unsigned long long RcvHdrAddr6;
- unsigned long long RcvHdrAddr7;
- unsigned long long RcvHdrAddr8;
- unsigned long long ReservedRHA[7];
- unsigned long long RcvHdrTailAddr0;
- unsigned long long RcvHdrTailAddr1;
- unsigned long long RcvHdrTailAddr2;
- unsigned long long RcvHdrTailAddr3;
- unsigned long long RcvHdrTailAddr4;
- unsigned long long RcvHdrTailAddr5;
- unsigned long long RcvHdrTailAddr6;
- unsigned long long RcvHdrTailAddr7;
- unsigned long long RcvHdrTailAddr8;
- unsigned long long ReservedRHTA[7];
- unsigned long long Sync; /* Software only */
- unsigned long long Dump; /* Software only */
- unsigned long long SimVer; /* Software only */
- unsigned long long ReservedSW[5];
- unsigned long long SerdesConfig0;
- unsigned long long SerdesConfig1;
- unsigned long long SerdesStatus;
- unsigned long long XGXSConfig;
- unsigned long long ReservedSW2[4];
-};
-
-struct _infinipath_do_not_use_counters {
- __u64 LBIntCnt;
- __u64 LBFlowStallCnt;
- __u64 Reserved1;
- __u64 TxUnsupVLErrCnt;
- __u64 TxDataPktCnt;
- __u64 TxFlowPktCnt;
- __u64 TxDwordCnt;
- __u64 TxLenErrCnt;
- __u64 TxMaxMinLenErrCnt;
- __u64 TxUnderrunCnt;
- __u64 TxFlowStallCnt;
- __u64 TxDroppedPktCnt;
- __u64 RxDroppedPktCnt;
- __u64 RxDataPktCnt;
- __u64 RxFlowPktCnt;
- __u64 RxDwordCnt;
- __u64 RxLenErrCnt;
- __u64 RxMaxMinLenErrCnt;
- __u64 RxICRCErrCnt;
- __u64 RxVCRCErrCnt;
- __u64 RxFlowCtrlErrCnt;
- __u64 RxBadFormatCnt;
- __u64 RxLinkProblemCnt;
- __u64 RxEBPCnt;
- __u64 RxLPCRCErrCnt;
- __u64 RxBufOvflCnt;
- __u64 RxTIDFullErrCnt;
- __u64 RxTIDValidErrCnt;
- __u64 RxPKeyMismatchCnt;
- __u64 RxP0HdrEgrOvflCnt;
- __u64 RxP1HdrEgrOvflCnt;
- __u64 RxP2HdrEgrOvflCnt;
- __u64 RxP3HdrEgrOvflCnt;
- __u64 RxP4HdrEgrOvflCnt;
- __u64 RxP5HdrEgrOvflCnt;
- __u64 RxP6HdrEgrOvflCnt;
- __u64 RxP7HdrEgrOvflCnt;
- __u64 RxP8HdrEgrOvflCnt;
- __u64 Reserved6;
- __u64 Reserved7;
- __u64 IBStatusChangeCnt;
- __u64 IBLinkErrRecoveryCnt;
- __u64 IBLinkDownedCnt;
- __u64 IBSymbolErrCnt;
-};
-
-#define IPATH_KREG_OFFSET(field) (offsetof( \
- struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
-#define IPATH_CREG_OFFSET(field) (offsetof( \
- struct _infinipath_do_not_use_counters, field) / sizeof(u64))
-
-static const struct ipath_kregs ipath_ht_kregs = {
- .kr_control = IPATH_KREG_OFFSET(Control),
- .kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
- .kr_debugport = IPATH_KREG_OFFSET(DebugPort),
- .kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
- .kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
- .kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
- .kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
- .kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
- .kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
- .kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
- .kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
- .kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
- .kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
- .kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
- .kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
- .kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
- .kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
- .kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
- .kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
- .kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
- .kr_intclear = IPATH_KREG_OFFSET(IntClear),
- .kr_interruptconfig = IPATH_KREG_OFFSET(InterruptConfig),
- .kr_intmask = IPATH_KREG_OFFSET(IntMask),
- .kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
- .kr_mdio = IPATH_KREG_OFFSET(MDIO),
- .kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
- .kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
- .kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
- .kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
- .kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
- .kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
- .kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
- .kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
- .kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
- .kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
- .kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
- .kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
- .kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
- .kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
- .kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
- .kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
- .kr_revision = IPATH_KREG_OFFSET(Revision),
- .kr_scratch = IPATH_KREG_OFFSET(Scratch),
- .kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
- .kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
- .kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendPIOAvailAddr),
- .kr_sendpiobufbase = IPATH_KREG_OFFSET(SendPIOBufBase),
- .kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendPIOBufCnt),
- .kr_sendpiosize = IPATH_KREG_OFFSET(SendPIOSize),
- .kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
- .kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
- .kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
- .kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
- .kr_serdesconfig0 = IPATH_KREG_OFFSET(SerdesConfig0),
- .kr_serdesconfig1 = IPATH_KREG_OFFSET(SerdesConfig1),
- .kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus),
- .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
- /*
- * These should not be used directly via ipath_write_kreg64(),
- * use them with ipath_write_kreg64_port(),
- */
- .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
- .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0)
-};
-
-static const struct ipath_cregs ipath_ht_cregs = {
- .cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
- .cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
- .cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
- .cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
- .cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
- .cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
- .cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
- .cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
- .cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
- .cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
- .cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
- .cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
- /* calc from Reg_CounterRegBase + offset */
- .cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
- .cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
- .cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
- .cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
- .cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
- .cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
- .cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
- .cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
- .cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
- .cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
- .cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
- .cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
- .cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
- .cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
- .cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
- .cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
- .cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
- .cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
- .cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
- .cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
- .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt)
-};
-
-/* kr_intstatus, kr_intclear, kr_intmask bits */
-#define INFINIPATH_I_RCVURG_MASK ((1U<<9)-1)
-#define INFINIPATH_I_RCVURG_SHIFT 0
-#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<9)-1)
-#define INFINIPATH_I_RCVAVAIL_SHIFT 12
-
-/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
-#define INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT 0
-#define INFINIPATH_HWE_HTCMEMPARITYERR_MASK 0x3FFFFFULL
-#define INFINIPATH_HWE_HTCLNKABYTE0CRCERR 0x0000000000800000ULL
-#define INFINIPATH_HWE_HTCLNKABYTE1CRCERR 0x0000000001000000ULL
-#define INFINIPATH_HWE_HTCLNKBBYTE0CRCERR 0x0000000002000000ULL
-#define INFINIPATH_HWE_HTCLNKBBYTE1CRCERR 0x0000000004000000ULL
-#define INFINIPATH_HWE_HTCMISCERR4 0x0000000008000000ULL
-#define INFINIPATH_HWE_HTCMISCERR5 0x0000000010000000ULL
-#define INFINIPATH_HWE_HTCMISCERR6 0x0000000020000000ULL
-#define INFINIPATH_HWE_HTCMISCERR7 0x0000000040000000ULL
-#define INFINIPATH_HWE_HTCBUSTREQPARITYERR 0x0000000080000000ULL
-#define INFINIPATH_HWE_HTCBUSTRESPPARITYERR 0x0000000100000000ULL
-#define INFINIPATH_HWE_HTCBUSIREQPARITYERR 0x0000000200000000ULL
-#define INFINIPATH_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
-#define INFINIPATH_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
-#define INFINIPATH_HWE_HTBPLL_FBSLIP 0x0200000000000000ULL
-#define INFINIPATH_HWE_HTBPLL_RFSLIP 0x0400000000000000ULL
-#define INFINIPATH_HWE_HTAPLL_FBSLIP 0x0800000000000000ULL
-#define INFINIPATH_HWE_HTAPLL_RFSLIP 0x1000000000000000ULL
-#define INFINIPATH_HWE_SERDESPLLFAILED 0x2000000000000000ULL
-
-#define IBA6110_IBCS_LINKTRAININGSTATE_MASK 0xf
-#define IBA6110_IBCS_LINKSTATE_SHIFT 4
-
-/* kr_extstatus bits */
-#define INFINIPATH_EXTS_FREQSEL 0x2
-#define INFINIPATH_EXTS_SERDESSEL 0x4
-#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000
-#define INFINIPATH_EXTS_MEMBIST_CORRECT 0x0000000000008000
-
-
-/* TID entries (memory), HT-only */
-#define INFINIPATH_RT_ADDR_MASK 0xFFFFFFFFFFULL /* 40 bits valid */
-#define INFINIPATH_RT_VALID 0x8000000000000000ULL
-#define INFINIPATH_RT_ADDR_SHIFT 0
-#define INFINIPATH_RT_BUFSIZE_MASK 0x3FFFULL
-#define INFINIPATH_RT_BUFSIZE_SHIFT 48
-
-#define INFINIPATH_R_INTRAVAIL_SHIFT 16
-#define INFINIPATH_R_TAILUPD_SHIFT 31
-
-/* kr_xgxsconfig bits */
-#define INFINIPATH_XGXS_RESET 0x7ULL
-
-/*
- * masks and bits that are different in different chips, or present only
- * in one
- */
-static const ipath_err_t infinipath_hwe_htcmemparityerr_mask =
- INFINIPATH_HWE_HTCMEMPARITYERR_MASK;
-static const ipath_err_t infinipath_hwe_htcmemparityerr_shift =
- INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT;
-
-static const ipath_err_t infinipath_hwe_htclnkabyte0crcerr =
- INFINIPATH_HWE_HTCLNKABYTE0CRCERR;
-static const ipath_err_t infinipath_hwe_htclnkabyte1crcerr =
- INFINIPATH_HWE_HTCLNKABYTE1CRCERR;
-static const ipath_err_t infinipath_hwe_htclnkbbyte0crcerr =
- INFINIPATH_HWE_HTCLNKBBYTE0CRCERR;
-static const ipath_err_t infinipath_hwe_htclnkbbyte1crcerr =
- INFINIPATH_HWE_HTCLNKBBYTE1CRCERR;
-
-#define _IPATH_GPIO_SDA_NUM 1
-#define _IPATH_GPIO_SCL_NUM 0
-
-#define IPATH_GPIO_SDA \
- (1ULL << (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-#define IPATH_GPIO_SCL \
- (1ULL << (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-
-/* keep the code below somewhat more readable; not used elsewhere */
-#define _IPATH_HTLINK0_CRCBITS (infinipath_hwe_htclnkabyte0crcerr | \
- infinipath_hwe_htclnkabyte1crcerr)
-#define _IPATH_HTLINK1_CRCBITS (infinipath_hwe_htclnkbbyte0crcerr | \
- infinipath_hwe_htclnkbbyte1crcerr)
-#define _IPATH_HTLANE0_CRCBITS (infinipath_hwe_htclnkabyte0crcerr | \
- infinipath_hwe_htclnkbbyte0crcerr)
-#define _IPATH_HTLANE1_CRCBITS (infinipath_hwe_htclnkabyte1crcerr | \
- infinipath_hwe_htclnkbbyte1crcerr)
-
-static void hwerr_crcbits(struct ipath_devdata *dd, ipath_err_t hwerrs,
- char *msg, size_t msgl)
-{
- char bitsmsg[64];
- ipath_err_t crcbits = hwerrs &
- (_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS);
- /* don't check if 8bit HT */
- if (dd->ipath_flags & IPATH_8BIT_IN_HT0)
- crcbits &= ~infinipath_hwe_htclnkabyte1crcerr;
- /* don't check if 8bit HT */
- if (dd->ipath_flags & IPATH_8BIT_IN_HT1)
- crcbits &= ~infinipath_hwe_htclnkbbyte1crcerr;
- /*
- * we'll want to ignore link errors on link that is
- * not in use, if any. For now, complain about both
- */
- if (crcbits) {
- u16 ctrl0, ctrl1;
- snprintf(bitsmsg, sizeof bitsmsg,
- "[HT%s lane %s CRC (%llx); powercycle to completely clear]",
- !(crcbits & _IPATH_HTLINK1_CRCBITS) ?
- "0 (A)" : (!(crcbits & _IPATH_HTLINK0_CRCBITS)
- ? "1 (B)" : "0+1 (A+B)"),
- !(crcbits & _IPATH_HTLANE1_CRCBITS) ? "0"
- : (!(crcbits & _IPATH_HTLANE0_CRCBITS) ? "1" :
- "0+1"), (unsigned long long) crcbits);
- strlcat(msg, bitsmsg, msgl);
-
- /*
- * print extra info for debugging. slave/primary
- * config word 4, 8 (link control 0, 1)
- */
-
- if (pci_read_config_word(dd->pcidev,
- dd->ipath_ht_slave_off + 0x4,
- &ctrl0))
- dev_info(&dd->pcidev->dev, "Couldn't read "
- "linkctrl0 of slave/primary "
- "config block\n");
- else if (!(ctrl0 & 1 << 6))
- /* not if EOC bit set */
- ipath_dbg("HT linkctrl0 0x%x%s%s\n", ctrl0,
- ((ctrl0 >> 8) & 7) ? " CRC" : "",
- ((ctrl0 >> 4) & 1) ? "linkfail" :
- "");
- if (pci_read_config_word(dd->pcidev,
- dd->ipath_ht_slave_off + 0x8,
- &ctrl1))
- dev_info(&dd->pcidev->dev, "Couldn't read "
- "linkctrl1 of slave/primary "
- "config block\n");
- else if (!(ctrl1 & 1 << 6))
- /* not if EOC bit set */
- ipath_dbg("HT linkctrl1 0x%x%s%s\n", ctrl1,
- ((ctrl1 >> 8) & 7) ? " CRC" : "",
- ((ctrl1 >> 4) & 1) ? "linkfail" :
- "");
-
- /* disable until driver reloaded */
- dd->ipath_hwerrmask &= ~crcbits;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
- ipath_dbg("HT crc errs: %s\n", msg);
- } else
- ipath_dbg("ignoring HT crc errors 0x%llx, "
- "not in use\n", (unsigned long long)
- (hwerrs & (_IPATH_HTLINK0_CRCBITS |
- _IPATH_HTLINK1_CRCBITS)));
-}
-
-/* 6110 specific hardware errors... */
-static const struct ipath_hwerror_msgs ipath_6110_hwerror_msgs[] = {
- INFINIPATH_HWE_MSG(HTCBUSIREQPARITYERR, "HTC Ireq Parity"),
- INFINIPATH_HWE_MSG(HTCBUSTREQPARITYERR, "HTC Treq Parity"),
- INFINIPATH_HWE_MSG(HTCBUSTRESPPARITYERR, "HTC Tresp Parity"),
- INFINIPATH_HWE_MSG(HTCMISCERR5, "HT core Misc5"),
- INFINIPATH_HWE_MSG(HTCMISCERR6, "HT core Misc6"),
- INFINIPATH_HWE_MSG(HTCMISCERR7, "HT core Misc7"),
- INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"),
- INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"),
-};
-
-#define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \
- INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \
- << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
-#define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \
- << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)
-
-static void ipath_ht_txe_recover(struct ipath_devdata *dd)
-{
- ++ipath_stats.sps_txeparity;
- dev_info(&dd->pcidev->dev,
- "Recovering from TXE PIO parity error\n");
-}
-
-
-/**
- * ipath_ht_handle_hwerrors - display hardware errors.
- * @dd: the infinipath device
- * @msg: the output buffer
- * @msgl: the size of the output buffer
- *
- * Use same msg buffer as regular errors to avoid excessive stack
- * use. Most hardware errors are catastrophic, but for right now,
- * we'll print them and continue. We reuse the same message buffer as
- * ipath_handle_errors() to avoid excessive stack usage.
- */
-static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
- size_t msgl)
-{
- ipath_err_t hwerrs;
- u32 bits, ctrl;
- int isfatal = 0;
- char bitsmsg[64];
- int log_idx;
-
- hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
-
- if (!hwerrs) {
- ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
- /*
- * better than printing cofusing messages
- * This seems to be related to clearing the crc error, or
- * the pll error during init.
- */
- goto bail;
- } else if (hwerrs == -1LL) {
- ipath_dev_err(dd, "Read of hardware error status failed "
- "(all bits set); ignoring\n");
- goto bail;
- }
- ipath_stats.sps_hwerrs++;
-
- /* Always clear the error status register, except MEMBISTFAIL,
- * regardless of whether we continue or stop using the chip.
- * We want that set so we know it failed, even across driver reload.
- * We'll still ignore it in the hwerrmask. We do this partly for
- * diagnostics, but also for support */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
- hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
-
- hwerrs &= dd->ipath_hwerrmask;
-
- /* We log some errors to EEPROM, check if we have any of those. */
- for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx)
- if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log)
- ipath_inc_eeprom_err(dd, log_idx, 1);
-
- /*
- * make sure we get this much out, unless told to be quiet,
- * it's a parity error we may recover from,
- * or it's occurred within the last 5 seconds
- */
- if ((hwerrs & ~(dd->ipath_lasthwerror | TXE_PIO_PARITY |
- RXE_EAGER_PARITY)) ||
- (ipath_debug & __IPATH_VERBDBG))
- dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
- "(cleared)\n", (unsigned long long) hwerrs);
- dd->ipath_lasthwerror |= hwerrs;
-
- if (hwerrs & ~dd->ipath_hwe_bitsextant)
- ipath_dev_err(dd, "hwerror interrupt with unknown errors "
- "%llx set\n", (unsigned long long)
- (hwerrs & ~dd->ipath_hwe_bitsextant));
-
- ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
- if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
- /*
- * parity errors in send memory are recoverable,
- * just cancel the send (if indicated in * sendbuffererror),
- * count the occurrence, unfreeze (if no other handled
- * hardware error bits are set), and continue. They can
- * occur if a processor speculative read is done to the PIO
- * buffer while we are sending a packet, for example.
- */
- if (hwerrs & TXE_PIO_PARITY) {
- ipath_ht_txe_recover(dd);
- hwerrs &= ~TXE_PIO_PARITY;
- }
-
- if (!hwerrs) {
- ipath_dbg("Clearing freezemode on ignored or "
- "recovered hardware error\n");
- ipath_clear_freeze(dd);
- }
- }
-
- *msg = '\0';
-
- /*
- * may someday want to decode into which bits are which
- * functional area for parity errors, etc.
- */
- if (hwerrs & (infinipath_hwe_htcmemparityerr_mask
- << INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT)) {
- bits = (u32) ((hwerrs >>
- INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT) &
- INFINIPATH_HWE_HTCMEMPARITYERR_MASK);
- snprintf(bitsmsg, sizeof bitsmsg, "[HTC Parity Errs %x] ",
- bits);
- strlcat(msg, bitsmsg, msgl);
- }
-
- ipath_format_hwerrors(hwerrs,
- ipath_6110_hwerror_msgs,
- ARRAY_SIZE(ipath_6110_hwerror_msgs),
- msg, msgl);
-
- if (hwerrs & (_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS))
- hwerr_crcbits(dd, hwerrs, msg, msgl);
-
- if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
- strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]",
- msgl);
- /* ignore from now on, so disable until driver reloaded */
- dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
- }
-#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \
- INFINIPATH_HWE_COREPLL_RFSLIP | \
- INFINIPATH_HWE_HTBPLL_FBSLIP | \
- INFINIPATH_HWE_HTBPLL_RFSLIP | \
- INFINIPATH_HWE_HTAPLL_FBSLIP | \
- INFINIPATH_HWE_HTAPLL_RFSLIP)
-
- if (hwerrs & _IPATH_PLL_FAIL) {
- snprintf(bitsmsg, sizeof bitsmsg,
- "[PLL failed (%llx), InfiniPath hardware unusable]",
- (unsigned long long) (hwerrs & _IPATH_PLL_FAIL));
- strlcat(msg, bitsmsg, msgl);
- /* ignore from now on, so disable until driver reloaded */
- dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
- }
-
- if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
- /*
- * If it occurs, it is left masked since the eternal
- * interface is unused
- */
- dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
- }
-
- if (hwerrs) {
- /*
- * if any set that we aren't ignoring; only
- * make the complaint once, in case it's stuck
- * or recurring, and we get here multiple
- * times.
- * force link down, so switch knows, and
- * LEDs are turned off
- */
- if (dd->ipath_flags & IPATH_INITTED) {
- ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
- ipath_setup_ht_setextled(dd,
- INFINIPATH_IBCS_L_STATE_DOWN,
- INFINIPATH_IBCS_LT_STATE_DISABLED);
- ipath_dev_err(dd, "Fatal Hardware Error (freeze "
- "mode), no longer usable, SN %.16s\n",
- dd->ipath_serial);
- isfatal = 1;
- }
- *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
- /* mark as having had error */
- *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
- /*
- * mark as not usable, at a minimum until driver
- * is reloaded, probably until reboot, since no
- * other reset is possible.
- */
- dd->ipath_flags &= ~IPATH_INITTED;
- } else {
- *msg = 0; /* recovered from all of them */
- }
- if (*msg)
- ipath_dev_err(dd, "%s hardware error\n", msg);
- if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg)
- /*
- * for status file; if no trailing brace is copied,
- * we'll know it was truncated.
- */
- snprintf(dd->ipath_freezemsg,
- dd->ipath_freezelen, "{%s}", msg);
-
-bail:;
-}
-
-/**
- * ipath_ht_boardname - fill in the board name
- * @dd: the infinipath device
- * @name: the output buffer
- * @namelen: the size of the output buffer
- *
- * fill in the board name, based on the board revision register
- */
-static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
- size_t namelen)
-{
- char *n = NULL;
- u8 boardrev = dd->ipath_boardrev;
- int ret = 0;
-
- switch (boardrev) {
- case 5:
- /*
- * original production board; two production levels, with
- * different serial number ranges. See ipath_ht_early_init() for
- * case where we enable IPATH_GPIO_INTR for later serial # range.
- * Original 112* serial number is no longer supported.
- */
- n = "InfiniPath_QHT7040";
- break;
- case 7:
- /* small form factor production board */
- n = "InfiniPath_QHT7140";
- break;
- default: /* don't know, just print the number */
- ipath_dev_err(dd, "Don't yet know about board "
- "with ID %u\n", boardrev);
- snprintf(name, namelen, "Unknown_InfiniPath_QHT7xxx_%u",
- boardrev);
- break;
- }
- if (n)
- snprintf(name, namelen, "%s", n);
-
- if (ret) {
- ipath_dev_err(dd, "Unsupported InfiniPath board %s!\n", name);
- goto bail;
- }
- if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 ||
- dd->ipath_minrev > 4)) {
- /*
- * This version of the driver only supports Rev 3.2 - 3.4
- */
- ipath_dev_err(dd,
- "Unsupported InfiniPath hardware revision %u.%u!\n",
- dd->ipath_majrev, dd->ipath_minrev);
- ret = 1;
- goto bail;
- }
- /*
- * pkt/word counters are 32 bit, and therefore wrap fast enough
- * that we snapshot them from a timer, and maintain 64 bit shadow
- * copies
- */
- dd->ipath_flags |= IPATH_32BITCOUNTERS;
- dd->ipath_flags |= IPATH_GPIO_INTR;
- if (dd->ipath_lbus_speed != 800)
- ipath_dev_err(dd,
- "Incorrectly configured for HT @ %uMHz\n",
- dd->ipath_lbus_speed);
-
- /*
- * set here, not in ipath_init_*_funcs because we have to do
- * it after we can read chip registers.
- */
- dd->ipath_ureg_align =
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);
-
-bail:
- return ret;
-}
-
-static void ipath_check_htlink(struct ipath_devdata *dd)
-{
- u8 linkerr, link_off, i;
-
- for (i = 0; i < 2; i++) {
- link_off = dd->ipath_ht_slave_off + i * 4 + 0xd;
- if (pci_read_config_byte(dd->pcidev, link_off, &linkerr))
- dev_info(&dd->pcidev->dev, "Couldn't read "
- "linkerror%d of HT slave/primary block\n",
- i);
- else if (linkerr & 0xf0) {
- ipath_cdbg(VERBOSE, "HT linkerr%d bits 0x%x set, "
- "clearing\n", linkerr >> 4, i);
- /*
- * writing the linkerr bits that are set should
- * clear them
- */
- if (pci_write_config_byte(dd->pcidev, link_off,
- linkerr))
- ipath_dbg("Failed write to clear HT "
- "linkerror%d\n", i);
- if (pci_read_config_byte(dd->pcidev, link_off,
- &linkerr))
- dev_info(&dd->pcidev->dev,
- "Couldn't reread linkerror%d of "
- "HT slave/primary block\n", i);
- else if (linkerr & 0xf0)
- dev_info(&dd->pcidev->dev,
- "HT linkerror%d bits 0x%x "
- "couldn't be cleared\n",
- i, linkerr >> 4);
- }
- }
-}
-
-static int ipath_setup_ht_reset(struct ipath_devdata *dd)
-{
- ipath_dbg("No reset possible for this InfiniPath hardware\n");
- return 0;
-}
-
-#define HT_INTR_DISC_CONFIG 0x80 /* HT interrupt and discovery cap */
-#define HT_INTR_REG_INDEX 2 /* intconfig requires indirect accesses */
-
-/*
- * Bits 13-15 of command==0 is slave/primary block. Clear any HT CRC
- * errors. We only bother to do this at load time, because it's OK if
- * it happened before we were loaded (first time after boot/reset),
- * but any time after that, it's fatal anyway. Also need to not check
- * for upper byte errors if we are in 8 bit mode, so figure out
- * our width. For now, at least, also complain if it's 8 bit.
- */
-static void slave_or_pri_blk(struct ipath_devdata *dd, struct pci_dev *pdev,
- int pos, u8 cap_type)
-{
- u8 linkwidth = 0, linkerr, link_a_b_off, link_off;
- u16 linkctrl = 0;
- int i;
-
- dd->ipath_ht_slave_off = pos;
- /* command word, master_host bit */
- /* master host || slave */
- if ((cap_type >> 2) & 1)
- link_a_b_off = 4;
- else
- link_a_b_off = 0;
- ipath_cdbg(VERBOSE, "HT%u (Link %c) connected to processor\n",
- link_a_b_off ? 1 : 0,
- link_a_b_off ? 'B' : 'A');
-
- link_a_b_off += pos;
-
- /*
- * check both link control registers; clear both HT CRC sets if
- * necessary.
- */
- for (i = 0; i < 2; i++) {
- link_off = pos + i * 4 + 0x4;
- if (pci_read_config_word(pdev, link_off, &linkctrl))
- ipath_dev_err(dd, "Couldn't read HT link control%d "
- "register\n", i);
- else if (linkctrl & (0xf << 8)) {
- ipath_cdbg(VERBOSE, "Clear linkctrl%d CRC Error "
- "bits %x\n", i, linkctrl & (0xf << 8));
- /*
- * now write them back to clear the error.
- */
- pci_write_config_word(pdev, link_off,
- linkctrl & (0xf << 8));
- }
- }
-
- /*
- * As with HT CRC bits, same for protocol errors that might occur
- * during boot.
- */
- for (i = 0; i < 2; i++) {
- link_off = pos + i * 4 + 0xd;
- if (pci_read_config_byte(pdev, link_off, &linkerr))
- dev_info(&pdev->dev, "Couldn't read linkerror%d "
- "of HT slave/primary block\n", i);
- else if (linkerr & 0xf0) {
- ipath_cdbg(VERBOSE, "HT linkerr%d bits 0x%x set, "
- "clearing\n", linkerr >> 4, i);
- /*
- * writing the linkerr bits that are set will clear
- * them
- */
- if (pci_write_config_byte
- (pdev, link_off, linkerr))
- ipath_dbg("Failed write to clear HT "
- "linkerror%d\n", i);
- if (pci_read_config_byte(pdev, link_off, &linkerr))
- dev_info(&pdev->dev, "Couldn't reread "
- "linkerror%d of HT slave/primary "
- "block\n", i);
- else if (linkerr & 0xf0)
- dev_info(&pdev->dev, "HT linkerror%d bits "
- "0x%x couldn't be cleared\n",
- i, linkerr >> 4);
- }
- }
-
- /*
- * this is just for our link to the host, not devices connected
- * through tunnel.
- */
-
- if (pci_read_config_byte(pdev, link_a_b_off + 7, &linkwidth))
- ipath_dev_err(dd, "Couldn't read HT link width "
- "config register\n");
- else {
- u32 width;
- switch (linkwidth & 7) {
- case 5:
- width = 4;
- break;
- case 4:
- width = 2;
- break;
- case 3:
- width = 32;
- break;
- case 1:
- width = 16;
- break;
- case 0:
- default: /* if wrong, assume 8 bit */
- width = 8;
- break;
- }
-
- dd->ipath_lbus_width = width;
-
- if (linkwidth != 0x11) {
- ipath_dev_err(dd, "Not configured for 16 bit HT "
- "(%x)\n", linkwidth);
- if (!(linkwidth & 0xf)) {
- ipath_dbg("Will ignore HT lane1 errors\n");
- dd->ipath_flags |= IPATH_8BIT_IN_HT0;
- }
- }
- }
-
- /*
- * this is just for our link to the host, not devices connected
- * through tunnel.
- */
- if (pci_read_config_byte(pdev, link_a_b_off + 0xd, &linkwidth))
- ipath_dev_err(dd, "Couldn't read HT link frequency "
- "config register\n");
- else {
- u32 speed;
- switch (linkwidth & 0xf) {
- case 6:
- speed = 1000;
- break;
- case 5:
- speed = 800;
- break;
- case 4:
- speed = 600;
- break;
- case 3:
- speed = 500;
- break;
- case 2:
- speed = 400;
- break;
- case 1:
- speed = 300;
- break;
- default:
- /*
- * assume reserved and vendor-specific are 200...
- */
- case 0:
- speed = 200;
- break;
- }
- dd->ipath_lbus_speed = speed;
- }
-
- snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
- "HyperTransport,%uMHz,x%u\n",
- dd->ipath_lbus_speed,
- dd->ipath_lbus_width);
-}
-
-static int ipath_ht_intconfig(struct ipath_devdata *dd)
-{
- int ret;
-
- if (dd->ipath_intconfig) {
- ipath_write_kreg(dd, dd->ipath_kregs->kr_interruptconfig,
- dd->ipath_intconfig); /* interrupt address */
- ret = 0;
- } else {
- ipath_dev_err(dd, "No interrupts enabled, couldn't setup "
- "interrupt address\n");
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static void ipath_ht_irq_update(struct pci_dev *dev, int irq,
- struct ht_irq_msg *msg)
-{
- struct ipath_devdata *dd = pci_get_drvdata(dev);
- u64 prev_intconfig = dd->ipath_intconfig;
-
- dd->ipath_intconfig = msg->address_lo;
- dd->ipath_intconfig |= ((u64) msg->address_hi) << 32;
-
- /*
- * If the previous value of dd->ipath_intconfig is zero, we're
- * getting configured for the first time, and must not program the
- * intconfig register here (it will be programmed later, when the
- * hardware is ready). Otherwise, we should.
- */
- if (prev_intconfig)
- ipath_ht_intconfig(dd);
-}
-
-/**
- * ipath_setup_ht_config - setup the interruptconfig register
- * @dd: the infinipath device
- * @pdev: the PCI device
- *
- * setup the interruptconfig register from the HT config info.
- * Also clear CRC errors in HT linkcontrol, if necessary.
- * This is done only for the real hardware. It is done before
- * chip address space is initted, so can't touch infinipath registers
- */
-static int ipath_setup_ht_config(struct ipath_devdata *dd,
- struct pci_dev *pdev)
-{
- int pos, ret;
-
- ret = __ht_create_irq(pdev, 0, ipath_ht_irq_update);
- if (ret < 0) {
- ipath_dev_err(dd, "Couldn't create interrupt handler: "
- "err %d\n", ret);
- goto bail;
- }
- dd->ipath_irq = ret;
- ret = 0;
-
- /*
- * Handle clearing CRC errors in linkctrl register if necessary. We
- * do this early, before we ever enable errors or hardware errors,
- * mostly to avoid causing the chip to enter freeze mode.
- */
- pos = pci_find_capability(pdev, PCI_CAP_ID_HT);
- if (!pos) {
- ipath_dev_err(dd, "Couldn't find HyperTransport "
- "capability; no interrupts\n");
- ret = -ENODEV;
- goto bail;
- }
- do {
- u8 cap_type;
-
- /*
- * The HT capability type byte is 3 bytes after the
- * capability byte.
- */
- if (pci_read_config_byte(pdev, pos + 3, &cap_type)) {
- dev_info(&pdev->dev, "Couldn't read config "
- "command @ %d\n", pos);
- continue;
- }
- if (!(cap_type & 0xE0))
- slave_or_pri_blk(dd, pdev, pos, cap_type);
- } while ((pos = pci_find_next_capability(pdev, pos,
- PCI_CAP_ID_HT)));
-
- dd->ipath_flags |= IPATH_SWAP_PIOBUFS;
-
-bail:
- return ret;
-}
-
-/**
- * ipath_setup_ht_cleanup - clean up any per-chip chip-specific stuff
- * @dd: the infinipath device
- *
- * Called during driver unload.
- * This is currently a nop for the HT chip, not for all chips
- */
-static void ipath_setup_ht_cleanup(struct ipath_devdata *dd)
-{
-}
-
-/**
- * ipath_setup_ht_setextled - set the state of the two external LEDs
- * @dd: the infinipath device
- * @lst: the L state
- * @ltst: the LT state
- *
- * Set the state of the two external LEDs, to indicate physical and
- * logical state of IB link. For this chip (at least with recommended
- * board pinouts), LED1 is Green (physical state), and LED2 is Yellow
- * (logical state)
- *
- * Note: We try to match the Mellanox HCA LED behavior as best
- * we can. Green indicates physical link state is OK (something is
- * plugged in, and we can train).
- * Amber indicates the link is logically up (ACTIVE).
- * Mellanox further blinks the amber LED to indicate data packet
- * activity, but we have no hardware support for that, so it would
- * require waking up every 10-20 msecs and checking the counters
- * on the chip, and then turning the LED off if appropriate. That's
- * visible overhead, so not something we will do.
- *
- */
-static void ipath_setup_ht_setextled(struct ipath_devdata *dd,
- u64 lst, u64 ltst)
-{
- u64 extctl;
- unsigned long flags = 0;
-
- /* the diags use the LED to indicate diag info, so we leave
- * the external LED alone when the diags are running */
- if (ipath_diag_inuse)
- return;
-
- /* Allow override of LED display for, e.g. Locating system in rack */
- if (dd->ipath_led_override) {
- ltst = (dd->ipath_led_override & IPATH_LED_PHYS)
- ? INFINIPATH_IBCS_LT_STATE_LINKUP
- : INFINIPATH_IBCS_LT_STATE_DISABLED;
- lst = (dd->ipath_led_override & IPATH_LED_LOG)
- ? INFINIPATH_IBCS_L_STATE_ACTIVE
- : INFINIPATH_IBCS_L_STATE_DOWN;
- }
-
- spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
- /*
- * start by setting both LED control bits to off, then turn
- * on the appropriate bit(s).
- */
- if (dd->ipath_boardrev == 8) { /* LS/X-1 uses different pins */
- /*
- * major difference is that INFINIPATH_EXTC_LEDGBLERR_OFF
- * is inverted, because it is normally used to indicate
- * a hardware fault at reset, if there were errors
- */
- extctl = (dd->ipath_extctrl & ~INFINIPATH_EXTC_LEDGBLOK_ON)
- | INFINIPATH_EXTC_LEDGBLERR_OFF;
- if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
- extctl &= ~INFINIPATH_EXTC_LEDGBLERR_OFF;
- if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
- extctl |= INFINIPATH_EXTC_LEDGBLOK_ON;
- } else {
- extctl = dd->ipath_extctrl &
- ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
- INFINIPATH_EXTC_LED2PRIPORT_ON);
- if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
- extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
- if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
- extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
- }
- dd->ipath_extctrl = extctl;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
- spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
-}
-
-static void ipath_init_ht_variables(struct ipath_devdata *dd)
-{
- /*
- * setup the register offsets, since they are different for each
- * chip
- */
- dd->ipath_kregs = &ipath_ht_kregs;
- dd->ipath_cregs = &ipath_ht_cregs;
-
- dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
- dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
- dd->ipath_gpio_sda = IPATH_GPIO_SDA;
- dd->ipath_gpio_scl = IPATH_GPIO_SCL;
-
- /*
- * Fill in data for field-values that change in newer chips.
- * We dynamically specify only the mask for LINKTRAININGSTATE
- * and only the shift for LINKSTATE, as they are the only ones
- * that change. Also precalculate the 3 link states of interest
- * and the combined mask.
- */
- dd->ibcs_ls_shift = IBA6110_IBCS_LINKSTATE_SHIFT;
- dd->ibcs_lts_mask = IBA6110_IBCS_LINKTRAININGSTATE_MASK;
- dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK <<
- dd->ibcs_ls_shift) | dd->ibcs_lts_mask;
- dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
- INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
- (INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift);
- dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
- INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
- (INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift);
- dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
- INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
- (INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift);
-
- /*
- * Fill in data for ibcc field-values that change in newer chips.
- * We dynamically specify only the mask for LINKINITCMD
- * and only the shift for LINKCMD and MAXPKTLEN, as they are
- * the only ones that change.
- */
- dd->ibcc_lic_mask = INFINIPATH_IBCC_LINKINITCMD_MASK;
- dd->ibcc_lc_shift = INFINIPATH_IBCC_LINKCMD_SHIFT;
- dd->ibcc_mpl_shift = INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
-
- /* Fill in shifts for RcvCtrl. */
- dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT;
- dd->ipath_r_intravail_shift = INFINIPATH_R_INTRAVAIL_SHIFT;
- dd->ipath_r_tailupd_shift = INFINIPATH_R_TAILUPD_SHIFT;
- dd->ipath_r_portcfg_shift = 0; /* Not on IBA6110 */
-
- dd->ipath_i_bitsextant =
- (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
- (INFINIPATH_I_RCVAVAIL_MASK <<
- INFINIPATH_I_RCVAVAIL_SHIFT) |
- INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
- INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO;
-
- dd->ipath_e_bitsextant =
- INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
- INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
- INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
- INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
- INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
- INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
- INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
- INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
- INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
- INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SMAXPKTLEN |
- INFINIPATH_E_SUNDERRUN | INFINIPATH_E_SPKTLEN |
- INFINIPATH_E_SDROPPEDSMPPKT | INFINIPATH_E_SDROPPEDDATAPKT |
- INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
- INFINIPATH_E_SUNSUPVL | INFINIPATH_E_IBSTATUSCHANGED |
- INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET |
- INFINIPATH_E_HARDWARE;
-
- dd->ipath_hwe_bitsextant =
- (INFINIPATH_HWE_HTCMEMPARITYERR_MASK <<
- INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT) |
- (INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) |
- (INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
- INFINIPATH_HWE_HTCLNKABYTE0CRCERR |
- INFINIPATH_HWE_HTCLNKABYTE1CRCERR |
- INFINIPATH_HWE_HTCLNKBBYTE0CRCERR |
- INFINIPATH_HWE_HTCLNKBBYTE1CRCERR |
- INFINIPATH_HWE_HTCMISCERR4 |
- INFINIPATH_HWE_HTCMISCERR5 | INFINIPATH_HWE_HTCMISCERR6 |
- INFINIPATH_HWE_HTCMISCERR7 |
- INFINIPATH_HWE_HTCBUSTREQPARITYERR |
- INFINIPATH_HWE_HTCBUSTRESPPARITYERR |
- INFINIPATH_HWE_HTCBUSIREQPARITYERR |
- INFINIPATH_HWE_RXDSYNCMEMPARITYERR |
- INFINIPATH_HWE_MEMBISTFAILED |
- INFINIPATH_HWE_COREPLL_FBSLIP |
- INFINIPATH_HWE_COREPLL_RFSLIP |
- INFINIPATH_HWE_HTBPLL_FBSLIP |
- INFINIPATH_HWE_HTBPLL_RFSLIP |
- INFINIPATH_HWE_HTAPLL_FBSLIP |
- INFINIPATH_HWE_HTAPLL_RFSLIP |
- INFINIPATH_HWE_SERDESPLLFAILED |
- INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
- INFINIPATH_HWE_IBCBUSFRSPCPARITYERR;
-
- dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
- dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
- dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT;
- dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT;
-
- /*
- * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
- * 2 is Some Misc, 3 is reserved for future.
- */
- dd->ipath_eep_st_masks[0].hwerrs_to_log =
- INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT;
-
- dd->ipath_eep_st_masks[1].hwerrs_to_log =
- INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
-
- dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
-
- dd->delay_mult = 2; /* SDR, 4X, can't change */
-
- dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
- dd->ipath_link_speed_supported = IPATH_IB_SDR;
- dd->ipath_link_width_enabled = IB_WIDTH_4X;
- dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported;
- /* these can't change for this chip, so set once */
- dd->ipath_link_width_active = dd->ipath_link_width_enabled;
- dd->ipath_link_speed_active = dd->ipath_link_speed_enabled;
-}
-
-/**
- * ipath_ht_init_hwerrors - enable hardware errors
- * @dd: the infinipath device
- *
- * now that we have finished initializing everything that might reasonably
- * cause a hardware error, and cleared those errors bits as they occur,
- * we can enable hardware errors in the mask (potentially enabling
- * freeze mode), and enable hardware errors as errors (along with
- * everything else) in errormask
- */
-static void ipath_ht_init_hwerrors(struct ipath_devdata *dd)
-{
- ipath_err_t val;
- u64 extsval;
-
- extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
-
- if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST))
- ipath_dev_err(dd, "MemBIST did not complete!\n");
- if (extsval & INFINIPATH_EXTS_MEMBIST_CORRECT)
- ipath_dbg("MemBIST corrected\n");
-
- ipath_check_htlink(dd);
-
- /* barring bugs, all hwerrors become interrupts, which can */
- val = -1LL;
- /* don't look at crc lane1 if 8 bit */
- if (dd->ipath_flags & IPATH_8BIT_IN_HT0)
- val &= ~infinipath_hwe_htclnkabyte1crcerr;
- /* don't look at crc lane1 if 8 bit */
- if (dd->ipath_flags & IPATH_8BIT_IN_HT1)
- val &= ~infinipath_hwe_htclnkbbyte1crcerr;
-
- /*
- * disable RXDSYNCMEMPARITY because external serdes is unused,
- * and therefore the logic will never be used or initialized,
- * and uninitialized state will normally result in this error
- * being asserted. Similarly for the external serdess pll
- * lock signal.
- */
- val &= ~(INFINIPATH_HWE_SERDESPLLFAILED |
- INFINIPATH_HWE_RXDSYNCMEMPARITYERR);
-
- /*
- * Disable MISCERR4 because of an inversion in the HT core
- * logic checking for errors that cause this bit to be set.
- * The errata can also cause the protocol error bit to be set
- * in the HT config space linkerror register(s).
- */
- val &= ~INFINIPATH_HWE_HTCMISCERR4;
-
- /*
- * PLL ignored because unused MDIO interface has a logic problem
- */
- if (dd->ipath_boardrev == 4 || dd->ipath_boardrev == 9)
- val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
- dd->ipath_hwerrmask = val;
-}
-
-
-
-
-/**
- * ipath_ht_bringup_serdes - bring up the serdes
- * @dd: the infinipath device
- */
-static int ipath_ht_bringup_serdes(struct ipath_devdata *dd)
-{
- u64 val, config1;
- int ret = 0, change = 0;
-
- ipath_dbg("Trying to bringup serdes\n");
-
- if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
- INFINIPATH_HWE_SERDESPLLFAILED)
- {
- ipath_dbg("At start, serdes PLL failed bit set in "
- "hwerrstatus, clearing and continuing\n");
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
- INFINIPATH_HWE_SERDESPLLFAILED);
- }
-
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
- config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1);
-
- ipath_cdbg(VERBOSE, "Initial serdes status is config0=%llx "
- "config1=%llx, sstatus=%llx xgxs %llx\n",
- (unsigned long long) val, (unsigned long long) config1,
- (unsigned long long)
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
- (unsigned long long)
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
-
- /* force reset on */
- val |= INFINIPATH_SERDC0_RESET_PLL
- /* | INFINIPATH_SERDC0_RESET_MASK */
- ;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
- udelay(15); /* need pll reset set at least for a bit */
-
- if (val & INFINIPATH_SERDC0_RESET_PLL) {
- u64 val2 = val &= ~INFINIPATH_SERDC0_RESET_PLL;
- /* set lane resets, and tx idle, during pll reset */
- val2 |= INFINIPATH_SERDC0_RESET_MASK |
- INFINIPATH_SERDC0_TXIDLE;
- ipath_cdbg(VERBOSE, "Clearing serdes PLL reset (writing "
- "%llx)\n", (unsigned long long) val2);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0,
- val2);
- /*
- * be sure chip saw it
- */
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- /*
- * need pll reset clear at least 11 usec before lane
- * resets cleared; give it a few more
- */
- udelay(15);
- val = val2; /* for check below */
- }
-
- if (val & (INFINIPATH_SERDC0_RESET_PLL |
- INFINIPATH_SERDC0_RESET_MASK |
- INFINIPATH_SERDC0_TXIDLE)) {
- val &= ~(INFINIPATH_SERDC0_RESET_PLL |
- INFINIPATH_SERDC0_RESET_MASK |
- INFINIPATH_SERDC0_TXIDLE);
- /* clear them */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0,
- val);
- }
-
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
- if (val & INFINIPATH_XGXS_RESET) {
- /* normally true after boot */
- val &= ~INFINIPATH_XGXS_RESET;
- change = 1;
- }
- if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) &
- INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) {
- /* need to compensate for Tx inversion in partner */
- val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
- INFINIPATH_XGXS_RX_POL_SHIFT);
- val |= dd->ipath_rx_pol_inv <<
- INFINIPATH_XGXS_RX_POL_SHIFT;
- change = 1;
- }
- if (change)
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-
- /* clear current and de-emphasis bits */
- config1 &= ~0x0ffffffff00ULL;
- /* set current to 20ma */
- config1 |= 0x00000000000ULL;
- /* set de-emphasis to -5.68dB */
- config1 |= 0x0cccc000000ULL;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig1, config1);
-
- ipath_cdbg(VERBOSE, "After setup: serdes status is config0=%llx "
- "config1=%llx, sstatus=%llx xgxs %llx\n",
- (unsigned long long) val, (unsigned long long) config1,
- (unsigned long long)
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
- (unsigned long long)
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
-
- return ret; /* for now, say we always succeeded */
-}
-
-/**
- * ipath_ht_quiet_serdes - set serdes to txidle
- * @dd: the infinipath device
- * driver is being unloaded
- */
-static void ipath_ht_quiet_serdes(struct ipath_devdata *dd)
-{
- u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-
- val |= INFINIPATH_SERDC0_TXIDLE;
- ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n",
- (unsigned long long) val);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
-}
-
-/**
- * ipath_pe_put_tid - write a TID in chip
- * @dd: the infinipath device
- * @tidptr: pointer to the expected TID (in chip) to update
- * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
- * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
- *
- * This exists as a separate routine to allow for special locking etc.
- * It's used for both the full cleanup on exit, as well as the normal
- * setup and teardown.
- */
-static void ipath_ht_put_tid(struct ipath_devdata *dd,
- u64 __iomem *tidptr, u32 type,
- unsigned long pa)
-{
- if (!dd->ipath_kregbase)
- return;
-
- if (pa != dd->ipath_tidinvalid) {
- if (unlikely((pa & ~INFINIPATH_RT_ADDR_MASK))) {
- dev_info(&dd->pcidev->dev,
- "physaddr %lx has more than "
- "40 bits, using only 40!!!\n", pa);
- pa &= INFINIPATH_RT_ADDR_MASK;
- }
- if (type == RCVHQ_RCV_TYPE_EAGER)
- pa |= dd->ipath_tidtemplate;
- else {
- /* in words (fixed, full page). */
- u64 lenvalid = PAGE_SIZE >> 2;
- lenvalid <<= INFINIPATH_RT_BUFSIZE_SHIFT;
- pa |= lenvalid | INFINIPATH_RT_VALID;
- }
- }
-
- writeq(pa, tidptr);
-}
-
-
-/**
- * ipath_ht_clear_tid - clear all TID entries for a port, expected and eager
- * @dd: the infinipath device
- * @port: the port
- *
- * Used from ipath_close(), and at chip initialization.
- */
-static void ipath_ht_clear_tids(struct ipath_devdata *dd, unsigned port)
-{
- u64 __iomem *tidbase;
- int i;
-
- if (!dd->ipath_kregbase)
- return;
-
- ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
-
- /*
- * need to invalidate all of the expected TID entries for this
- * port, so we don't have valid entries that might somehow get
- * used (early in next use of this port, or through some bug)
- */
- tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
- dd->ipath_rcvtidbase +
- port * dd->ipath_rcvtidcnt *
- sizeof(*tidbase));
- for (i = 0; i < dd->ipath_rcvtidcnt; i++)
- ipath_ht_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
- dd->ipath_tidinvalid);
-
- tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
- dd->ipath_rcvegrbase +
- port * dd->ipath_rcvegrcnt *
- sizeof(*tidbase));
-
- for (i = 0; i < dd->ipath_rcvegrcnt; i++)
- ipath_ht_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
- dd->ipath_tidinvalid);
-}
-
-/**
- * ipath_ht_tidtemplate - setup constants for TID updates
- * @dd: the infinipath device
- *
- * We setup stuff that we use a lot, to avoid calculating each time
- */
-static void ipath_ht_tidtemplate(struct ipath_devdata *dd)
-{
- dd->ipath_tidtemplate = dd->ipath_ibmaxlen >> 2;
- dd->ipath_tidtemplate <<= INFINIPATH_RT_BUFSIZE_SHIFT;
- dd->ipath_tidtemplate |= INFINIPATH_RT_VALID;
-
- /*
- * work around chip errata bug 7358, by marking invalid tids
- * as having max length
- */
- dd->ipath_tidinvalid = (-1LL & INFINIPATH_RT_BUFSIZE_MASK) <<
- INFINIPATH_RT_BUFSIZE_SHIFT;
-}
-
-static int ipath_ht_early_init(struct ipath_devdata *dd)
-{
- u32 __iomem *piobuf;
- u32 pioincr, val32;
- int i;
-
- /*
- * one cache line; long IB headers will spill over into received
- * buffer
- */
- dd->ipath_rcvhdrentsize = 16;
- dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
-
- /*
- * For HT, we allocate a somewhat overly large eager buffer,
- * such that we can guarantee that we can receive the largest
- * packet that we can send out. To truly support a 4KB MTU,
- * we need to bump this to a large value. To date, other than
- * testing, we have never encountered an HCA that can really
- * send 4KB MTU packets, so we do not handle that (we'll get
- * errors interrupts if we ever see one).
- */
- dd->ipath_rcvegrbufsize = dd->ipath_piosize2k;
-
- /*
- * the min() check here is currently a nop, but it may not
- * always be, depending on just how we do ipath_rcvegrbufsize
- */
- dd->ipath_ibmaxlen = min(dd->ipath_piosize2k,
- dd->ipath_rcvegrbufsize);
- dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
- ipath_ht_tidtemplate(dd);
-
- /*
- * zero all the TID entries at startup. We do this for sanity,
- * in case of a previous driver crash of some kind, and also
- * because the chip powers up with these memories in an unknown
- * state. Use portcnt, not cfgports, since this is for the
- * full chip, not for current (possibly different) configuration
- * value.
- * Chip Errata bug 6447
- */
- for (val32 = 0; val32 < dd->ipath_portcnt; val32++)
- ipath_ht_clear_tids(dd, val32);
-
- /*
- * write the pbc of each buffer, to be sure it's initialized, then
- * cancel all the buffers, and also abort any packets that might
- * have been in flight for some reason (the latter is for driver
- * unload/reload, but isn't a bad idea at first init). PIO send
- * isn't enabled at this point, so there is no danger of sending
- * these out on the wire.
- * Chip Errata bug 6610
- */
- piobuf = (u32 __iomem *) (((char __iomem *)(dd->ipath_kregbase)) +
- dd->ipath_piobufbase);
- pioincr = dd->ipath_palign / sizeof(*piobuf);
- for (i = 0; i < dd->ipath_piobcnt2k; i++) {
- /*
- * reasonable word count, just to init pbc
- */
- writel(16, piobuf);
- piobuf += pioincr;
- }
-
- ipath_get_eeprom_info(dd);
- if (dd->ipath_boardrev == 5) {
- /*
- * Later production QHT7040 has same changes as QHT7140, so
- * can use GPIO interrupts. They have serial #'s starting
- * with 128, rather than 112.
- */
- if (dd->ipath_serial[0] == '1' &&
- dd->ipath_serial[1] == '2' &&
- dd->ipath_serial[2] == '8')
- dd->ipath_flags |= IPATH_GPIO_INTR;
- else {
- ipath_dev_err(dd, "Unsupported InfiniPath board "
- "(serial number %.16s)!\n",
- dd->ipath_serial);
- return 1;
- }
- }
-
- if (dd->ipath_minrev >= 4) {
- /* Rev4+ reports extra errors via internal GPIO pins */
- dd->ipath_flags |= IPATH_GPIO_ERRINTRS;
- dd->ipath_gpio_mask |= IPATH_GPIO_ERRINTR_MASK;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
- dd->ipath_gpio_mask);
- }
-
- return 0;
-}
-
-
-/**
- * ipath_init_ht_get_base_info - set chip-specific flags for user code
- * @dd: the infinipath device
- * @kbase: ipath_base_info pointer
- *
- * We set the PCIE flag because the lower bandwidth on PCIe vs
- * HyperTransport can affect some user packet algorithms.
- */
-static int ipath_ht_get_base_info(struct ipath_portdata *pd, void *kbase)
-{
- struct ipath_base_info *kinfo = kbase;
-
- kinfo->spi_runtime_flags |= IPATH_RUNTIME_HT |
- IPATH_RUNTIME_PIO_REGSWAPPED;
-
- if (pd->port_dd->ipath_minrev < 4)
- kinfo->spi_runtime_flags |= IPATH_RUNTIME_RCVHDR_COPY;
-
- return 0;
-}
-
-static void ipath_ht_free_irq(struct ipath_devdata *dd)
-{
- free_irq(dd->ipath_irq, dd);
- ht_destroy_irq(dd->ipath_irq);
- dd->ipath_irq = 0;
- dd->ipath_intconfig = 0;
-}
-
-static struct ipath_message_header *
-ipath_ht_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr)
-{
- return (struct ipath_message_header *)
- &rhf_addr[sizeof(u64) / sizeof(u32)];
-}
-
-static void ipath_ht_config_ports(struct ipath_devdata *dd, ushort cfgports)
-{
- dd->ipath_portcnt =
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
- dd->ipath_p0_rcvegrcnt =
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
-}
-
-static void ipath_ht_read_counters(struct ipath_devdata *dd,
- struct infinipath_counters *cntrs)
-{
- cntrs->LBIntCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBIntCnt));
- cntrs->LBFlowStallCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBFlowStallCnt));
- cntrs->TxSDmaDescCnt = 0;
- cntrs->TxUnsupVLErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnsupVLErrCnt));
- cntrs->TxDataPktCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDataPktCnt));
- cntrs->TxFlowPktCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowPktCnt));
- cntrs->TxDwordCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDwordCnt));
- cntrs->TxLenErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxLenErrCnt));
- cntrs->TxMaxMinLenErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxMaxMinLenErrCnt));
- cntrs->TxUnderrunCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnderrunCnt));
- cntrs->TxFlowStallCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowStallCnt));
- cntrs->TxDroppedPktCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDroppedPktCnt));
- cntrs->RxDroppedPktCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDroppedPktCnt));
- cntrs->RxDataPktCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDataPktCnt));
- cntrs->RxFlowPktCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowPktCnt));
- cntrs->RxDwordCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDwordCnt));
- cntrs->RxLenErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLenErrCnt));
- cntrs->RxMaxMinLenErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxMaxMinLenErrCnt));
- cntrs->RxICRCErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxICRCErrCnt));
- cntrs->RxVCRCErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxVCRCErrCnt));
- cntrs->RxFlowCtrlErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowCtrlErrCnt));
- cntrs->RxBadFormatCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBadFormatCnt));
- cntrs->RxLinkProblemCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLinkProblemCnt));
- cntrs->RxEBPCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxEBPCnt));
- cntrs->RxLPCRCErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLPCRCErrCnt));
- cntrs->RxBufOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBufOvflCnt));
- cntrs->RxTIDFullErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDFullErrCnt));
- cntrs->RxTIDValidErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDValidErrCnt));
- cntrs->RxPKeyMismatchCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxPKeyMismatchCnt));
- cntrs->RxP0HdrEgrOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt));
- cntrs->RxP1HdrEgrOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP1HdrEgrOvflCnt));
- cntrs->RxP2HdrEgrOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP2HdrEgrOvflCnt));
- cntrs->RxP3HdrEgrOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP3HdrEgrOvflCnt));
- cntrs->RxP4HdrEgrOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP4HdrEgrOvflCnt));
- cntrs->RxP5HdrEgrOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP5HdrEgrOvflCnt));
- cntrs->RxP6HdrEgrOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP6HdrEgrOvflCnt));
- cntrs->RxP7HdrEgrOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP7HdrEgrOvflCnt));
- cntrs->RxP8HdrEgrOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP8HdrEgrOvflCnt));
- cntrs->RxP9HdrEgrOvflCnt = 0;
- cntrs->RxP10HdrEgrOvflCnt = 0;
- cntrs->RxP11HdrEgrOvflCnt = 0;
- cntrs->RxP12HdrEgrOvflCnt = 0;
- cntrs->RxP13HdrEgrOvflCnt = 0;
- cntrs->RxP14HdrEgrOvflCnt = 0;
- cntrs->RxP15HdrEgrOvflCnt = 0;
- cntrs->RxP16HdrEgrOvflCnt = 0;
- cntrs->IBStatusChangeCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBStatusChangeCnt));
- cntrs->IBLinkErrRecoveryCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt));
- cntrs->IBLinkDownedCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkDownedCnt));
- cntrs->IBSymbolErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBSymbolErrCnt));
- cntrs->RxVL15DroppedPktCnt = 0;
- cntrs->RxOtherLocalPhyErrCnt = 0;
- cntrs->PcieRetryBufDiagQwordCnt = 0;
- cntrs->ExcessBufferOvflCnt = dd->ipath_overrun_thresh_errs;
- cntrs->LocalLinkIntegrityErrCnt =
- (dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
- dd->ipath_lli_errs : dd->ipath_lli_errors;
- cntrs->RxVlErrCnt = 0;
- cntrs->RxDlidFltrCnt = 0;
-}
-
-
-/* no interrupt fallback for these chips */
-static int ipath_ht_nointr_fallback(struct ipath_devdata *dd)
-{
- return 0;
-}
-
-
-/*
- * reset the XGXS (between serdes and IBC). Slightly less intrusive
- * than resetting the IBC or external link state, and useful in some
- * cases to cause some retraining. To do this right, we reset IBC
- * as well.
- */
-static void ipath_ht_xgxs_reset(struct ipath_devdata *dd)
-{
- u64 val, prev_val;
-
- prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
- val = prev_val | INFINIPATH_XGXS_RESET;
- prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
- dd->ipath_control & ~INFINIPATH_C_LINKENABLE);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
- dd->ipath_control);
-}
-
-
-static int ipath_ht_get_ib_cfg(struct ipath_devdata *dd, int which)
-{
- int ret;
-
- switch (which) {
- case IPATH_IB_CFG_LWID:
- ret = dd->ipath_link_width_active;
- break;
- case IPATH_IB_CFG_SPD:
- ret = dd->ipath_link_speed_active;
- break;
- case IPATH_IB_CFG_LWID_ENB:
- ret = dd->ipath_link_width_enabled;
- break;
- case IPATH_IB_CFG_SPD_ENB:
- ret = dd->ipath_link_speed_enabled;
- break;
- default:
- ret = -ENOTSUPP;
- break;
- }
- return ret;
-}
-
-
-/* we assume range checking is already done, if needed */
-static int ipath_ht_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
-{
- int ret = 0;
-
- if (which == IPATH_IB_CFG_LWID_ENB)
- dd->ipath_link_width_enabled = val;
- else if (which == IPATH_IB_CFG_SPD_ENB)
- dd->ipath_link_speed_enabled = val;
- else
- ret = -ENOTSUPP;
- return ret;
-}
-
-
-static void ipath_ht_config_jint(struct ipath_devdata *dd, u16 a, u16 b)
-{
-}
-
-
-static int ipath_ht_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
-{
- ipath_setup_ht_setextled(dd, ipath_ib_linkstate(dd, ibcs),
- ipath_ib_linktrstate(dd, ibcs));
- return 0;
-}
-
-
-/**
- * ipath_init_iba6110_funcs - set up the chip-specific function pointers
- * @dd: the infinipath device
- *
- * This is global, and is called directly at init to set up the
- * chip-specific function pointers for later use.
- */
-void ipath_init_iba6110_funcs(struct ipath_devdata *dd)
-{
- dd->ipath_f_intrsetup = ipath_ht_intconfig;
- dd->ipath_f_bus = ipath_setup_ht_config;
- dd->ipath_f_reset = ipath_setup_ht_reset;
- dd->ipath_f_get_boardname = ipath_ht_boardname;
- dd->ipath_f_init_hwerrors = ipath_ht_init_hwerrors;
- dd->ipath_f_early_init = ipath_ht_early_init;
- dd->ipath_f_handle_hwerrors = ipath_ht_handle_hwerrors;
- dd->ipath_f_quiet_serdes = ipath_ht_quiet_serdes;
- dd->ipath_f_bringup_serdes = ipath_ht_bringup_serdes;
- dd->ipath_f_clear_tids = ipath_ht_clear_tids;
- dd->ipath_f_put_tid = ipath_ht_put_tid;
- dd->ipath_f_cleanup = ipath_setup_ht_cleanup;
- dd->ipath_f_setextled = ipath_setup_ht_setextled;
- dd->ipath_f_get_base_info = ipath_ht_get_base_info;
- dd->ipath_f_free_irq = ipath_ht_free_irq;
- dd->ipath_f_tidtemplate = ipath_ht_tidtemplate;
- dd->ipath_f_intr_fallback = ipath_ht_nointr_fallback;
- dd->ipath_f_get_msgheader = ipath_ht_get_msgheader;
- dd->ipath_f_config_ports = ipath_ht_config_ports;
- dd->ipath_f_read_counters = ipath_ht_read_counters;
- dd->ipath_f_xgxs_reset = ipath_ht_xgxs_reset;
- dd->ipath_f_get_ib_cfg = ipath_ht_get_ib_cfg;
- dd->ipath_f_set_ib_cfg = ipath_ht_set_ib_cfg;
- dd->ipath_f_config_jint = ipath_ht_config_jint;
- dd->ipath_f_ib_updown = ipath_ht_ib_updown;
-
- /*
- * initialize chip-specific variables
- */
- ipath_init_ht_variables(dd);
-}
diff --git a/drivers/staging/rdma/ipath/ipath_init_chip.c b/drivers/staging/rdma/ipath/ipath_init_chip.c
deleted file mode 100644
index a5eea199f733..000000000000
--- a/drivers/staging/rdma/ipath/ipath_init_chip.c
+++ /dev/null
@@ -1,1062 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/moduleparam.h>
-#include <linux/slab.h>
-#include <linux/stat.h>
-#include <linux/vmalloc.h>
-
-#include "ipath_kernel.h"
-#include "ipath_common.h"
-
-/*
- * min buffers we want to have per port, after driver
- */
-#define IPATH_MIN_USER_PORT_BUFCNT 7
-
-/*
- * Number of ports we are configured to use (to allow for more pio
- * buffers per port, etc.) Zero means use chip value.
- */
-static ushort ipath_cfgports;
-
-module_param_named(cfgports, ipath_cfgports, ushort, S_IRUGO);
-MODULE_PARM_DESC(cfgports, "Set max number of ports to use");
-
-/*
- * Number of buffers reserved for driver (verbs and layered drivers.)
- * Initialized based on number of PIO buffers if not set via module interface.
- * The problem with this is that it's global, but we'll use different
- * numbers for different chip types.
- */
-static ushort ipath_kpiobufs;
-
-static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp);
-
-module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_ushort,
- &ipath_kpiobufs, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver");
-
-/**
- * create_port0_egr - allocate the eager TID buffers
- * @dd: the infinipath device
- *
- * This code is now quite different for user and kernel, because
- * the kernel uses skb's, for the accelerated network performance.
- * This is the kernel (port0) version.
- *
- * Allocate the eager TID buffers and program them into infinipath.
- * We use the network layer alloc_skb() allocator to allocate the
- * memory, and either use the buffers as is for things like verbs
- * packets, or pass the buffers up to the ipath layered driver and
- * thence the network layer, replacing them as we do so (see
- * ipath_rcv_layer()).
- */
-static int create_port0_egr(struct ipath_devdata *dd)
-{
- unsigned e, egrcnt;
- struct ipath_skbinfo *skbinfo;
- int ret;
-
- egrcnt = dd->ipath_p0_rcvegrcnt;
-
- skbinfo = vmalloc(sizeof(*dd->ipath_port0_skbinfo) * egrcnt);
- if (skbinfo == NULL) {
- ipath_dev_err(dd, "allocation error for eager TID "
- "skb array\n");
- ret = -ENOMEM;
- goto bail;
- }
- for (e = 0; e < egrcnt; e++) {
- /*
- * This is a bit tricky in that we allocate extra
- * space for 2 bytes of the 14 byte ethernet header.
- * These two bytes are passed in the ipath header so
- * the rest of the data is word aligned. We allocate
- * 4 bytes so that the data buffer stays word aligned.
- * See ipath_kreceive() for more details.
- */
- skbinfo[e].skb = ipath_alloc_skb(dd, GFP_KERNEL);
- if (!skbinfo[e].skb) {
- ipath_dev_err(dd, "SKB allocation error for "
- "eager TID %u\n", e);
- while (e != 0)
- dev_kfree_skb(skbinfo[--e].skb);
- vfree(skbinfo);
- ret = -ENOMEM;
- goto bail;
- }
- }
- /*
- * After loop above, so we can test non-NULL to see if ready
- * to use at receive, etc.
- */
- dd->ipath_port0_skbinfo = skbinfo;
-
- for (e = 0; e < egrcnt; e++) {
- dd->ipath_port0_skbinfo[e].phys =
- ipath_map_single(dd->pcidev,
- dd->ipath_port0_skbinfo[e].skb->data,
- dd->ipath_ibmaxlen, PCI_DMA_FROMDEVICE);
- dd->ipath_f_put_tid(dd, e + (u64 __iomem *)
- ((char __iomem *) dd->ipath_kregbase +
- dd->ipath_rcvegrbase),
- RCVHQ_RCV_TYPE_EAGER,
- dd->ipath_port0_skbinfo[e].phys);
- }
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-static int bringup_link(struct ipath_devdata *dd)
-{
- u64 val, ibc;
- int ret = 0;
-
- /* hold IBC in reset */
- dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
- dd->ipath_control);
-
- /*
- * set initial max size pkt IBC will send, including ICRC; it's the
- * PIO buffer size in dwords, less 1; also see ipath_set_mtu()
- */
- val = (dd->ipath_ibmaxlen >> 2) + 1;
- ibc = val << dd->ibcc_mpl_shift;
-
- /* flowcontrolwatermark is in units of KBytes */
- ibc |= 0x5ULL << INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT;
- /*
- * How often flowctrl sent. More or less in usecs; balance against
- * watermark value, so that in theory senders always get a flow
- * control update in time to not let the IB link go idle.
- */
- ibc |= 0x3ULL << INFINIPATH_IBCC_FLOWCTRLPERIOD_SHIFT;
- /* max error tolerance */
- ibc |= 0xfULL << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
- /* use "real" buffer space for */
- ibc |= 4ULL << INFINIPATH_IBCC_CREDITSCALE_SHIFT;
- /* IB credit flow control. */
- ibc |= 0xfULL << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
- /* initially come up waiting for TS1, without sending anything. */
- dd->ipath_ibcctrl = ibc;
- /*
- * Want to start out with both LINKCMD and LINKINITCMD in NOP
- * (0 and 0). Don't put linkinitcmd in ipath_ibcctrl, want that
- * to stay a NOP. Flag that we are disabled, for the (unlikely)
- * case that some recovery path is trying to bring the link up
- * before we are ready.
- */
- ibc |= INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
- INFINIPATH_IBCC_LINKINITCMD_SHIFT;
- dd->ipath_flags |= IPATH_IB_LINK_DISABLED;
- ipath_cdbg(VERBOSE, "Writing 0x%llx to ibcctrl\n",
- (unsigned long long) ibc);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, ibc);
-
- // be sure chip saw it
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-
- ret = dd->ipath_f_bringup_serdes(dd);
-
- if (ret)
- dev_info(&dd->pcidev->dev, "Could not initialize SerDes, "
- "not usable\n");
- else {
- /* enable IBC */
- dd->ipath_control |= INFINIPATH_C_LINKENABLE;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
- dd->ipath_control);
- }
-
- return ret;
-}
-
-static struct ipath_portdata *create_portdata0(struct ipath_devdata *dd)
-{
- struct ipath_portdata *pd;
-
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (pd) {
- pd->port_dd = dd;
- pd->port_cnt = 1;
- /* The port 0 pkey table is used by the layer interface. */
- pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY;
- pd->port_seq_cnt = 1;
- }
- return pd;
-}
-
-static int init_chip_first(struct ipath_devdata *dd)
-{
- struct ipath_portdata *pd;
- int ret = 0;
- u64 val;
-
- spin_lock_init(&dd->ipath_kernel_tid_lock);
- spin_lock_init(&dd->ipath_user_tid_lock);
- spin_lock_init(&dd->ipath_sendctrl_lock);
- spin_lock_init(&dd->ipath_uctxt_lock);
- spin_lock_init(&dd->ipath_sdma_lock);
- spin_lock_init(&dd->ipath_gpio_lock);
- spin_lock_init(&dd->ipath_eep_st_lock);
- spin_lock_init(&dd->ipath_sdepb_lock);
- mutex_init(&dd->ipath_eep_lock);
-
- /*
- * skip cfgports stuff because we are not allocating memory,
- * and we don't want problems if the portcnt changed due to
- * cfgports. We do still check and report a difference, if
- * not same (should be impossible).
- */
- dd->ipath_f_config_ports(dd, ipath_cfgports);
- if (!ipath_cfgports)
- dd->ipath_cfgports = dd->ipath_portcnt;
- else if (ipath_cfgports <= dd->ipath_portcnt) {
- dd->ipath_cfgports = ipath_cfgports;
- ipath_dbg("Configured to use %u ports out of %u in chip\n",
- dd->ipath_cfgports, ipath_read_kreg32(dd,
- dd->ipath_kregs->kr_portcnt));
- } else {
- dd->ipath_cfgports = dd->ipath_portcnt;
- ipath_dbg("Tried to configured to use %u ports; chip "
- "only supports %u\n", ipath_cfgports,
- ipath_read_kreg32(dd,
- dd->ipath_kregs->kr_portcnt));
- }
- /*
- * Allocate full portcnt array, rather than just cfgports, because
- * cleanup iterates across all possible ports.
- */
- dd->ipath_pd = kcalloc(dd->ipath_portcnt, sizeof(*dd->ipath_pd),
- GFP_KERNEL);
-
- if (!dd->ipath_pd) {
- ipath_dev_err(dd, "Unable to allocate portdata array, "
- "failing\n");
- ret = -ENOMEM;
- goto done;
- }
-
- pd = create_portdata0(dd);
- if (!pd) {
- ipath_dev_err(dd, "Unable to allocate portdata for port "
- "0, failing\n");
- ret = -ENOMEM;
- goto done;
- }
- dd->ipath_pd[0] = pd;
-
- dd->ipath_rcvtidcnt =
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
- dd->ipath_rcvtidbase =
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase);
- dd->ipath_rcvegrcnt =
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
- dd->ipath_rcvegrbase =
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase);
- dd->ipath_palign =
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);
- dd->ipath_piobufbase =
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufbase);
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiosize);
- dd->ipath_piosize2k = val & ~0U;
- dd->ipath_piosize4k = val >> 32;
- if (dd->ipath_piosize4k == 0 && ipath_mtu4096)
- ipath_mtu4096 = 0; /* 4KB not supported by this chip */
- dd->ipath_ibmtu = ipath_mtu4096 ? 4096 : 2048;
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufcnt);
- dd->ipath_piobcnt2k = val & ~0U;
- dd->ipath_piobcnt4k = val >> 32;
- dd->ipath_pio2kbase =
- (u32 __iomem *) (((char __iomem *) dd->ipath_kregbase) +
- (dd->ipath_piobufbase & 0xffffffff));
- if (dd->ipath_piobcnt4k) {
- dd->ipath_pio4kbase = (u32 __iomem *)
- (((char __iomem *) dd->ipath_kregbase) +
- (dd->ipath_piobufbase >> 32));
- /*
- * 4K buffers take 2 pages; we use roundup just to be
- * paranoid; we calculate it once here, rather than on
- * ever buf allocate
- */
- dd->ipath_4kalign = ALIGN(dd->ipath_piosize4k,
- dd->ipath_palign);
- ipath_dbg("%u 2k(%x) piobufs @ %p, %u 4k(%x) @ %p "
- "(%x aligned)\n",
- dd->ipath_piobcnt2k, dd->ipath_piosize2k,
- dd->ipath_pio2kbase, dd->ipath_piobcnt4k,
- dd->ipath_piosize4k, dd->ipath_pio4kbase,
- dd->ipath_4kalign);
- } else {
- ipath_dbg("%u 2k piobufs @ %p\n",
- dd->ipath_piobcnt2k, dd->ipath_pio2kbase);
- }
-done:
- return ret;
-}
-
-/**
- * init_chip_reset - re-initialize after a reset, or enable
- * @dd: the infinipath device
- *
- * sanity check at least some of the values after reset, and
- * ensure no receive or transmit (explicitly, in case reset
- * failed
- */
-static int init_chip_reset(struct ipath_devdata *dd)
-{
- u32 rtmp;
- int i;
- unsigned long flags;
-
- /*
- * ensure chip does no sends or receives, tail updates, or
- * pioavail updates while we re-initialize
- */
- dd->ipath_rcvctrl &= ~(1ULL << dd->ipath_r_tailupd_shift);
- for (i = 0; i < dd->ipath_portcnt; i++) {
- clear_bit(dd->ipath_r_portenable_shift + i,
- &dd->ipath_rcvctrl);
- clear_bit(dd->ipath_r_intravail_shift + i,
- &dd->ipath_rcvctrl);
- }
- ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
- dd->ipath_rcvctrl);
-
- spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
- dd->ipath_sendctrl = 0U; /* no sdma, etc */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
-
- rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
- if (rtmp != dd->ipath_rcvtidcnt)
- dev_info(&dd->pcidev->dev, "tidcnt was %u before "
- "reset, now %u, using original\n",
- dd->ipath_rcvtidcnt, rtmp);
- rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase);
- if (rtmp != dd->ipath_rcvtidbase)
- dev_info(&dd->pcidev->dev, "tidbase was %u before "
- "reset, now %u, using original\n",
- dd->ipath_rcvtidbase, rtmp);
- rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
- if (rtmp != dd->ipath_rcvegrcnt)
- dev_info(&dd->pcidev->dev, "egrcnt was %u before "
- "reset, now %u, using original\n",
- dd->ipath_rcvegrcnt, rtmp);
- rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase);
- if (rtmp != dd->ipath_rcvegrbase)
- dev_info(&dd->pcidev->dev, "egrbase was %u before "
- "reset, now %u, using original\n",
- dd->ipath_rcvegrbase, rtmp);
-
- return 0;
-}
-
-static int init_pioavailregs(struct ipath_devdata *dd)
-{
- int ret;
-
- dd->ipath_pioavailregs_dma = dma_alloc_coherent(
- &dd->pcidev->dev, PAGE_SIZE, &dd->ipath_pioavailregs_phys,
- GFP_KERNEL);
- if (!dd->ipath_pioavailregs_dma) {
- ipath_dev_err(dd, "failed to allocate PIOavail reg area "
- "in memory\n");
- ret = -ENOMEM;
- goto done;
- }
-
- /*
- * we really want L2 cache aligned, but for current CPUs of
- * interest, they are the same.
- */
- dd->ipath_statusp = (u64 *)
- ((char *)dd->ipath_pioavailregs_dma +
- ((2 * L1_CACHE_BYTES +
- dd->ipath_pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
- /* copy the current value now that it's really allocated */
- *dd->ipath_statusp = dd->_ipath_status;
- /*
- * setup buffer to hold freeze msg, accessible to apps,
- * following statusp
- */
- dd->ipath_freezemsg = (char *)&dd->ipath_statusp[1];
- /* and its length */
- dd->ipath_freezelen = L1_CACHE_BYTES - sizeof(dd->ipath_statusp[0]);
-
- ret = 0;
-
-done:
- return ret;
-}
-
-/**
- * init_shadow_tids - allocate the shadow TID array
- * @dd: the infinipath device
- *
- * allocate the shadow TID array, so we can ipath_munlock previous
- * entries. It may make more sense to move the pageshadow to the
- * port data structure, so we only allocate memory for ports actually
- * in use, since we at 8k per port, now.
- */
-static void init_shadow_tids(struct ipath_devdata *dd)
-{
- struct page **pages;
- dma_addr_t *addrs;
-
- pages = vzalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
- sizeof(struct page *));
- if (!pages) {
- ipath_dev_err(dd, "failed to allocate shadow page * "
- "array, no expected sends!\n");
- dd->ipath_pageshadow = NULL;
- return;
- }
-
- addrs = vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
- sizeof(dma_addr_t));
- if (!addrs) {
- ipath_dev_err(dd, "failed to allocate shadow dma handle "
- "array, no expected sends!\n");
- vfree(pages);
- dd->ipath_pageshadow = NULL;
- return;
- }
-
- dd->ipath_pageshadow = pages;
- dd->ipath_physshadow = addrs;
-}
-
-static void enable_chip(struct ipath_devdata *dd, int reinit)
-{
- u32 val;
- u64 rcvmask;
- unsigned long flags;
- int i;
-
- if (!reinit)
- init_waitqueue_head(&ipath_state_wait);
-
- ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
- dd->ipath_rcvctrl);
-
- spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
- /* Enable PIO send, and update of PIOavail regs to memory. */
- dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE |
- INFINIPATH_S_PIOBUFAVAILUPD;
-
- /*
- * Set the PIO avail update threshold to host memory
- * on chips that support it.
- */
- if (dd->ipath_pioupd_thresh)
- dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
- << INFINIPATH_S_UPDTHRESH_SHIFT;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
- /*
- * Enable kernel ports' receive and receive interrupt.
- * Other ports done as user opens and inits them.
- */
- rcvmask = 1ULL;
- dd->ipath_rcvctrl |= (rcvmask << dd->ipath_r_portenable_shift) |
- (rcvmask << dd->ipath_r_intravail_shift);
- if (!(dd->ipath_flags & IPATH_NODMA_RTAIL))
- dd->ipath_rcvctrl |= (1ULL << dd->ipath_r_tailupd_shift);
-
- ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
- dd->ipath_rcvctrl);
-
- /*
- * now ready for use. this should be cleared whenever we
- * detect a reset, or initiate one.
- */
- dd->ipath_flags |= IPATH_INITTED;
-
- /*
- * Init our shadow copies of head from tail values,
- * and write head values to match.
- */
- val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0);
- ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0);
-
- /* Initialize so we interrupt on next packet received */
- ipath_write_ureg(dd, ur_rcvhdrhead,
- dd->ipath_rhdrhead_intr_off |
- dd->ipath_pd[0]->port_head, 0);
-
- /*
- * by now pioavail updates to memory should have occurred, so
- * copy them into our working/shadow registers; this is in
- * case something went wrong with abort, but mostly to get the
- * initial values of the generation bit correct.
- */
- for (i = 0; i < dd->ipath_pioavregs; i++) {
- __le64 pioavail;
-
- /*
- * Chip Errata bug 6641; even and odd qwords>3 are swapped.
- */
- if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
- pioavail = dd->ipath_pioavailregs_dma[i ^ 1];
- else
- pioavail = dd->ipath_pioavailregs_dma[i];
- /*
- * don't need to worry about ipath_pioavailkernel here
- * because we will call ipath_chg_pioavailkernel() later
- * in initialization, to busy out buffers as needed
- */
- dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail);
- }
- /* can get counters, stats, etc. */
- dd->ipath_flags |= IPATH_PRESENT;
-}
-
-static int init_housekeeping(struct ipath_devdata *dd, int reinit)
-{
- char boardn[40];
- int ret = 0;
-
- /*
- * have to clear shadow copies of registers at init that are
- * not otherwise set here, or all kinds of bizarre things
- * happen with driver on chip reset
- */
- dd->ipath_rcvhdrsize = 0;
-
- /*
- * Don't clear ipath_flags as 8bit mode was set before
- * entering this func. However, we do set the linkstate to
- * unknown, so we can watch for a transition.
- * PRESENT is set because we want register reads to work,
- * and the kernel infrastructure saw it in config space;
- * We clear it if we have failures.
- */
- dd->ipath_flags |= IPATH_LINKUNK | IPATH_PRESENT;
- dd->ipath_flags &= ~(IPATH_LINKACTIVE | IPATH_LINKARMED |
- IPATH_LINKDOWN | IPATH_LINKINIT);
-
- ipath_cdbg(VERBOSE, "Try to read spc chip revision\n");
- dd->ipath_revision =
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
-
- /*
- * set up fundamental info we need to use the chip; we assume
- * if the revision reg and these regs are OK, we don't need to
- * special case the rest
- */
- dd->ipath_sregbase =
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_sendregbase);
- dd->ipath_cregbase =
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_counterregbase);
- dd->ipath_uregbase =
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_userregbase);
- ipath_cdbg(VERBOSE, "ipath_kregbase %p, sendbase %x usrbase %x, "
- "cntrbase %x\n", dd->ipath_kregbase, dd->ipath_sregbase,
- dd->ipath_uregbase, dd->ipath_cregbase);
- if ((dd->ipath_revision & 0xffffffff) == 0xffffffff
- || (dd->ipath_sregbase & 0xffffffff) == 0xffffffff
- || (dd->ipath_cregbase & 0xffffffff) == 0xffffffff
- || (dd->ipath_uregbase & 0xffffffff) == 0xffffffff) {
- ipath_dev_err(dd, "Register read failures from chip, "
- "giving up initialization\n");
- dd->ipath_flags &= ~IPATH_PRESENT;
- ret = -ENODEV;
- goto done;
- }
-
-
- /* clear diagctrl register, in case diags were running and crashed */
- ipath_write_kreg (dd, dd->ipath_kregs->kr_hwdiagctrl, 0);
-
- /* clear the initial reset flag, in case first driver load */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
- INFINIPATH_E_RESET);
-
- ipath_cdbg(VERBOSE, "Revision %llx (PCI %x)\n",
- (unsigned long long) dd->ipath_revision,
- dd->ipath_pcirev);
-
- if (((dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) &
- INFINIPATH_R_SOFTWARE_MASK) != IPATH_CHIP_SWVERSION) {
- ipath_dev_err(dd, "Driver only handles version %d, "
- "chip swversion is %d (%llx), failng\n",
- IPATH_CHIP_SWVERSION,
- (int)(dd->ipath_revision >>
- INFINIPATH_R_SOFTWARE_SHIFT) &
- INFINIPATH_R_SOFTWARE_MASK,
- (unsigned long long) dd->ipath_revision);
- ret = -ENOSYS;
- goto done;
- }
- dd->ipath_majrev = (u8) ((dd->ipath_revision >>
- INFINIPATH_R_CHIPREVMAJOR_SHIFT) &
- INFINIPATH_R_CHIPREVMAJOR_MASK);
- dd->ipath_minrev = (u8) ((dd->ipath_revision >>
- INFINIPATH_R_CHIPREVMINOR_SHIFT) &
- INFINIPATH_R_CHIPREVMINOR_MASK);
- dd->ipath_boardrev = (u8) ((dd->ipath_revision >>
- INFINIPATH_R_BOARDID_SHIFT) &
- INFINIPATH_R_BOARDID_MASK);
-
- ret = dd->ipath_f_get_boardname(dd, boardn, sizeof boardn);
-
- snprintf(dd->ipath_boardversion, sizeof(dd->ipath_boardversion),
- "ChipABI %u.%u, %s, InfiniPath%u %u.%u, PCI %u, "
- "SW Compat %u\n",
- IPATH_CHIP_VERS_MAJ, IPATH_CHIP_VERS_MIN, boardn,
- (unsigned)(dd->ipath_revision >> INFINIPATH_R_ARCH_SHIFT) &
- INFINIPATH_R_ARCH_MASK,
- dd->ipath_majrev, dd->ipath_minrev, dd->ipath_pcirev,
- (unsigned)(dd->ipath_revision >>
- INFINIPATH_R_SOFTWARE_SHIFT) &
- INFINIPATH_R_SOFTWARE_MASK);
-
- ipath_dbg("%s", dd->ipath_boardversion);
-
- if (ret)
- goto done;
-
- if (reinit)
- ret = init_chip_reset(dd);
- else
- ret = init_chip_first(dd);
-
-done:
- return ret;
-}
-
-static void verify_interrupt(unsigned long opaque)
-{
- struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
-
- if (!dd)
- return; /* being torn down */
-
- /*
- * If we don't have any interrupts, let the user know and
- * don't bother checking again.
- */
- if (dd->ipath_int_counter == 0) {
- if (!dd->ipath_f_intr_fallback(dd))
- dev_err(&dd->pcidev->dev, "No interrupts detected, "
- "not usable.\n");
- else /* re-arm the timer to see if fallback works */
- mod_timer(&dd->ipath_intrchk_timer, jiffies + HZ/2);
- } else
- ipath_cdbg(VERBOSE, "%u interrupts at timer check\n",
- dd->ipath_int_counter);
-}
-
-/**
- * ipath_init_chip - do the actual initialization sequence on the chip
- * @dd: the infinipath device
- * @reinit: reinitializing, so don't allocate new memory
- *
- * Do the actual initialization sequence on the chip. This is done
- * both from the init routine called from the PCI infrastructure, and
- * when we reset the chip, or detect that it was reset internally,
- * or it's administratively re-enabled.
- *
- * Memory allocation here and in called routines is only done in
- * the first case (reinit == 0). We have to be careful, because even
- * without memory allocation, we need to re-write all the chip registers
- * TIDs, etc. after the reset or enable has completed.
- */
-int ipath_init_chip(struct ipath_devdata *dd, int reinit)
-{
- int ret = 0;
- u32 kpiobufs, defkbufs;
- u32 piobufs, uports;
- u64 val;
- struct ipath_portdata *pd;
- gfp_t gfp_flags = GFP_USER | __GFP_COMP;
-
- ret = init_housekeeping(dd, reinit);
- if (ret)
- goto done;
-
- /*
- * We could bump this to allow for full rcvegrcnt + rcvtidcnt,
- * but then it no longer nicely fits power of two, and since
- * we now use routines that backend onto __get_free_pages, the
- * rest would be wasted.
- */
- dd->ipath_rcvhdrcnt = max(dd->ipath_p0_rcvegrcnt, dd->ipath_rcvegrcnt);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrcnt,
- dd->ipath_rcvhdrcnt);
-
- /*
- * Set up the shadow copies of the piobufavail registers,
- * which we compare against the chip registers for now, and
- * the in memory DMA'ed copies of the registers. This has to
- * be done early, before we calculate lastport, etc.
- */
- piobufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
- /*
- * calc number of pioavail registers, and save it; we have 2
- * bits per buffer.
- */
- dd->ipath_pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2)
- / (sizeof(u64) * BITS_PER_BYTE / 2);
- uports = dd->ipath_cfgports ? dd->ipath_cfgports - 1 : 0;
- if (piobufs > 144)
- defkbufs = 32 + dd->ipath_pioreserved;
- else
- defkbufs = 16 + dd->ipath_pioreserved;
-
- if (ipath_kpiobufs && (ipath_kpiobufs +
- (uports * IPATH_MIN_USER_PORT_BUFCNT)) > piobufs) {
- int i = (int) piobufs -
- (int) (uports * IPATH_MIN_USER_PORT_BUFCNT);
- if (i < 1)
- i = 1;
- dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of "
- "%d for kernel leaves too few for %d user ports "
- "(%d each); using %u\n", ipath_kpiobufs,
- piobufs, uports, IPATH_MIN_USER_PORT_BUFCNT, i);
- /*
- * shouldn't change ipath_kpiobufs, because could be
- * different for different devices...
- */
- kpiobufs = i;
- } else if (ipath_kpiobufs)
- kpiobufs = ipath_kpiobufs;
- else
- kpiobufs = defkbufs;
- dd->ipath_lastport_piobuf = piobufs - kpiobufs;
- dd->ipath_pbufsport =
- uports ? dd->ipath_lastport_piobuf / uports : 0;
- /* if not an even divisor, some user ports get extra buffers */
- dd->ipath_ports_extrabuf = dd->ipath_lastport_piobuf -
- (dd->ipath_pbufsport * uports);
- if (dd->ipath_ports_extrabuf)
- ipath_dbg("%u pbufs/port leaves some unused, add 1 buffer to "
- "ports <= %u\n", dd->ipath_pbufsport,
- dd->ipath_ports_extrabuf);
- dd->ipath_lastpioindex = 0;
- dd->ipath_lastpioindexl = dd->ipath_piobcnt2k;
- /* ipath_pioavailshadow initialized earlier */
- ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u "
- "each for %u user ports\n", kpiobufs,
- piobufs, dd->ipath_pbufsport, uports);
- ret = dd->ipath_f_early_init(dd);
- if (ret) {
- ipath_dev_err(dd, "Early initialization failure\n");
- goto done;
- }
-
- /*
- * Early_init sets rcvhdrentsize and rcvhdrsize, so this must be
- * done after early_init.
- */
- dd->ipath_hdrqlast =
- dd->ipath_rcvhdrentsize * (dd->ipath_rcvhdrcnt - 1);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrentsize,
- dd->ipath_rcvhdrentsize);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
- dd->ipath_rcvhdrsize);
-
- if (!reinit) {
- ret = init_pioavailregs(dd);
- init_shadow_tids(dd);
- if (ret)
- goto done;
- }
-
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr,
- dd->ipath_pioavailregs_phys);
-
- /*
- * this is to detect s/w errors, which the h/w works around by
- * ignoring the low 6 bits of address, if it wasn't aligned.
- */
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpioavailaddr);
- if (val != dd->ipath_pioavailregs_phys) {
- ipath_dev_err(dd, "Catastrophic software error, "
- "SendPIOAvailAddr written as %lx, "
- "read back as %llx\n",
- (unsigned long) dd->ipath_pioavailregs_phys,
- (unsigned long long) val);
- ret = -EINVAL;
- goto done;
- }
-
- ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvbthqp, IPATH_KD_QP);
-
- /*
- * make sure we are not in freeze, and PIO send enabled, so
- * writes to pbc happen
- */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, 0ULL);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
- ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
-
- /*
- * before error clears, since we expect serdes pll errors during
- * this, the first time after reset
- */
- if (bringup_link(dd)) {
- dev_info(&dd->pcidev->dev, "Failed to bringup IB link\n");
- ret = -ENETDOWN;
- goto done;
- }
-
- /*
- * clear any "expected" hwerrs from reset and/or initialization
- * clear any that aren't enabled (at least this once), and then
- * set the enable mask
- */
- dd->ipath_f_init_hwerrors(dd);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
- ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
-
- /* clear all */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
- /* enable errors that are masked, at least this first time. */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
- ~dd->ipath_maskederrs);
- dd->ipath_maskederrs = 0; /* don't re-enable ignored in timer */
- dd->ipath_errormask =
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask);
- /* clear any interrupts up to this point (ints still not enabled) */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
-
- dd->ipath_f_tidtemplate(dd);
-
- /*
- * Set up the port 0 (kernel) rcvhdr q and egr TIDs. If doing
- * re-init, the simplest way to handle this is to free
- * existing, and re-allocate.
- * Need to re-create rest of port 0 portdata as well.
- */
- pd = dd->ipath_pd[0];
- if (reinit) {
- struct ipath_portdata *npd;
-
- /*
- * Alloc and init new ipath_portdata for port0,
- * Then free old pd. Could lead to fragmentation, but also
- * makes later support for hot-swap easier.
- */
- npd = create_portdata0(dd);
- if (npd) {
- ipath_free_pddata(dd, pd);
- dd->ipath_pd[0] = npd;
- pd = npd;
- } else {
- ipath_dev_err(dd, "Unable to allocate portdata"
- " for port 0, failing\n");
- ret = -ENOMEM;
- goto done;
- }
- }
- ret = ipath_create_rcvhdrq(dd, pd);
- if (!ret)
- ret = create_port0_egr(dd);
- if (ret) {
- ipath_dev_err(dd, "failed to allocate kernel port's "
- "rcvhdrq and/or egr bufs\n");
- goto done;
- } else {
- enable_chip(dd, reinit);
- }
-
- /* after enable_chip, so pioavailshadow setup */
- ipath_chg_pioavailkernel(dd, 0, piobufs, 1);
-
- /*
- * Cancel any possible active sends from early driver load.
- * Follows early_init because some chips have to initialize
- * PIO buffers in early_init to avoid false parity errors.
- * After enable and ipath_chg_pioavailkernel so we can safely
- * enable pioavail updates and PIOENABLE; packets are now
- * ready to go out.
- */
- ipath_cancel_sends(dd, 1);
-
- if (!reinit) {
- /*
- * Used when we close a port, for DMA already in flight
- * at close.
- */
- dd->ipath_dummy_hdrq = dma_alloc_coherent(
- &dd->pcidev->dev, dd->ipath_pd[0]->port_rcvhdrq_size,
- &dd->ipath_dummy_hdrq_phys,
- gfp_flags);
- if (!dd->ipath_dummy_hdrq) {
- dev_info(&dd->pcidev->dev,
- "Couldn't allocate 0x%lx bytes for dummy hdrq\n",
- dd->ipath_pd[0]->port_rcvhdrq_size);
- /* fallback to just 0'ing */
- dd->ipath_dummy_hdrq_phys = 0UL;
- }
- }
-
- /*
- * cause retrigger of pending interrupts ignored during init,
- * even if we had errors
- */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
-
- if (!dd->ipath_stats_timer_active) {
- /*
- * first init, or after an admin disable/enable
- * set up stats retrieval timer, even if we had errors
- * in last portion of setup
- */
- setup_timer(&dd->ipath_stats_timer, ipath_get_faststats,
- (unsigned long)dd);
- /* every 5 seconds; */
- dd->ipath_stats_timer.expires = jiffies + 5 * HZ;
- /* takes ~16 seconds to overflow at full IB 4x bandwdith */
- add_timer(&dd->ipath_stats_timer);
- dd->ipath_stats_timer_active = 1;
- }
-
- /* Set up SendDMA if chip supports it */
- if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
- ret = setup_sdma(dd);
-
- /* Set up HoL state */
- setup_timer(&dd->ipath_hol_timer, ipath_hol_event, (unsigned long)dd);
-
- dd->ipath_hol_state = IPATH_HOL_UP;
-
-done:
- if (!ret) {
- *dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT;
- if (!dd->ipath_f_intrsetup(dd)) {
- /* now we can enable all interrupts from the chip */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask,
- -1LL);
- /* force re-interrupt of any pending interrupts. */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear,
- 0ULL);
- /* chip is usable; mark it as initialized */
- *dd->ipath_statusp |= IPATH_STATUS_INITTED;
-
- /*
- * setup to verify we get an interrupt, and fallback
- * to an alternate if necessary and possible
- */
- if (!reinit) {
- setup_timer(&dd->ipath_intrchk_timer,
- verify_interrupt,
- (unsigned long)dd);
- }
- dd->ipath_intrchk_timer.expires = jiffies + HZ/2;
- add_timer(&dd->ipath_intrchk_timer);
- } else
- ipath_dev_err(dd, "No interrupts enabled, couldn't "
- "setup interrupt address\n");
-
- if (dd->ipath_cfgports > ipath_stats.sps_nports)
- /*
- * sps_nports is a global, so, we set it to
- * the highest number of ports of any of the
- * chips we find; we never decrement it, at
- * least for now. Since this might have changed
- * over disable/enable or prior to reset, always
- * do the check and potentially adjust.
- */
- ipath_stats.sps_nports = dd->ipath_cfgports;
- } else
- ipath_dbg("Failed (%d) to initialize chip\n", ret);
-
- /* if ret is non-zero, we probably should do some cleanup
- here... */
- return ret;
-}
-
-static int ipath_set_kpiobufs(const char *str, struct kernel_param *kp)
-{
- struct ipath_devdata *dd;
- unsigned long flags;
- unsigned short val;
- int ret;
-
- ret = ipath_parse_ushort(str, &val);
-
- spin_lock_irqsave(&ipath_devs_lock, flags);
-
- if (ret < 0)
- goto bail;
-
- if (val == 0) {
- ret = -EINVAL;
- goto bail;
- }
-
- list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
- if (dd->ipath_kregbase)
- continue;
- if (val > (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k -
- (dd->ipath_cfgports *
- IPATH_MIN_USER_PORT_BUFCNT)))
- {
- ipath_dev_err(
- dd,
- "Allocating %d PIO bufs for kernel leaves "
- "too few for %d user ports (%d each)\n",
- val, dd->ipath_cfgports - 1,
- IPATH_MIN_USER_PORT_BUFCNT);
- ret = -EINVAL;
- goto bail;
- }
- dd->ipath_lastport_piobuf =
- dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - val;
- }
-
- ipath_kpiobufs = val;
- ret = 0;
-bail:
- spin_unlock_irqrestore(&ipath_devs_lock, flags);
-
- return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_intr.c b/drivers/staging/rdma/ipath/ipath_intr.c
deleted file mode 100644
index 0403fa28ed8d..000000000000
--- a/drivers/staging/rdma/ipath/ipath_intr.c
+++ /dev/null
@@ -1,1271 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/delay.h>
-
-#include "ipath_kernel.h"
-#include "ipath_verbs.h"
-#include "ipath_common.h"
-
-
-/*
- * Called when we might have an error that is specific to a particular
- * PIO buffer, and may need to cancel that buffer, so it can be re-used.
- */
-void ipath_disarm_senderrbufs(struct ipath_devdata *dd)
-{
- u32 piobcnt;
- unsigned long sbuf[4];
- /*
- * it's possible that sendbuffererror could have bits set; might
- * have already done this as a result of hardware error handling
- */
- piobcnt = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
- /* read these before writing errorclear */
- sbuf[0] = ipath_read_kreg64(
- dd, dd->ipath_kregs->kr_sendbuffererror);
- sbuf[1] = ipath_read_kreg64(
- dd, dd->ipath_kregs->kr_sendbuffererror + 1);
- if (piobcnt > 128)
- sbuf[2] = ipath_read_kreg64(
- dd, dd->ipath_kregs->kr_sendbuffererror + 2);
- if (piobcnt > 192)
- sbuf[3] = ipath_read_kreg64(
- dd, dd->ipath_kregs->kr_sendbuffererror + 3);
- else
- sbuf[3] = 0;
-
- if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) {
- int i;
- if (ipath_debug & (__IPATH_PKTDBG|__IPATH_DBG) &&
- time_after(dd->ipath_lastcancel, jiffies)) {
- __IPATH_DBG_WHICH(__IPATH_PKTDBG|__IPATH_DBG,
- "SendbufErrs %lx %lx", sbuf[0],
- sbuf[1]);
- if (ipath_debug & __IPATH_PKTDBG && piobcnt > 128)
- printk(" %lx %lx ", sbuf[2], sbuf[3]);
- printk("\n");
- }
-
- for (i = 0; i < piobcnt; i++)
- if (test_bit(i, sbuf))
- ipath_disarm_piobufs(dd, i, 1);
- /* ignore armlaunch errs for a bit */
- dd->ipath_lastcancel = jiffies+3;
- }
-}
-
-
-/* These are all rcv-related errors which we want to count for stats */
-#define E_SUM_PKTERRS \
- (INFINIPATH_E_RHDRLEN | INFINIPATH_E_RBADTID | \
- INFINIPATH_E_RBADVERSION | INFINIPATH_E_RHDR | \
- INFINIPATH_E_RLONGPKTLEN | INFINIPATH_E_RSHORTPKTLEN | \
- INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RMINPKTLEN | \
- INFINIPATH_E_RFORMATERR | INFINIPATH_E_RUNSUPVL | \
- INFINIPATH_E_RUNEXPCHAR | INFINIPATH_E_REBP)
-
-/* These are all send-related errors which we want to count for stats */
-#define E_SUM_ERRS \
- (INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM | \
- INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
- INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNSUPVL | \
- INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \
- INFINIPATH_E_INVALIDADDR)
-
-/*
- * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
- * errors not related to freeze and cancelling buffers. Can't ignore
- * armlaunch because could get more while still cleaning up, and need
- * to cancel those as they happen.
- */
-#define E_SPKT_ERRS_IGNORE \
- (INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
- INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SMINPKTLEN | \
- INFINIPATH_E_SPKTLEN)
-
-/*
- * these are errors that can occur when the link changes state while
- * a packet is being sent or received. This doesn't cover things
- * like EBP or VCRC that can be the result of a sending having the
- * link change state, so we receive a "known bad" packet.
- */
-#define E_SUM_LINK_PKTERRS \
- (INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
- INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \
- INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RMINPKTLEN | \
- INFINIPATH_E_RUNEXPCHAR)
-
-static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs)
-{
- u64 ignore_this_time = 0;
-
- ipath_disarm_senderrbufs(dd);
- if ((errs & E_SUM_LINK_PKTERRS) &&
- !(dd->ipath_flags & IPATH_LINKACTIVE)) {
- /*
- * This can happen when SMA is trying to bring the link
- * up, but the IB link changes state at the "wrong" time.
- * The IB logic then complains that the packet isn't
- * valid. We don't want to confuse people, so we just
- * don't print them, except at debug
- */
- ipath_dbg("Ignoring packet errors %llx, because link not "
- "ACTIVE\n", (unsigned long long) errs);
- ignore_this_time = errs & E_SUM_LINK_PKTERRS;
- }
-
- return ignore_this_time;
-}
-
-/* generic hw error messages... */
-#define INFINIPATH_HWE_TXEMEMPARITYERR_MSG(a) \
- { \
- .mask = ( INFINIPATH_HWE_TXEMEMPARITYERR_##a << \
- INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT ), \
- .msg = "TXE " #a " Memory Parity" \
- }
-#define INFINIPATH_HWE_RXEMEMPARITYERR_MSG(a) \
- { \
- .mask = ( INFINIPATH_HWE_RXEMEMPARITYERR_##a << \
- INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT ), \
- .msg = "RXE " #a " Memory Parity" \
- }
-
-static const struct ipath_hwerror_msgs ipath_generic_hwerror_msgs[] = {
- INFINIPATH_HWE_MSG(IBCBUSFRSPCPARITYERR, "IPATH2IB Parity"),
- INFINIPATH_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2IPATH Parity"),
-
- INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOBUF),
- INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOPBC),
- INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOLAUNCHFIFO),
-
- INFINIPATH_HWE_RXEMEMPARITYERR_MSG(RCVBUF),
- INFINIPATH_HWE_RXEMEMPARITYERR_MSG(LOOKUPQ),
- INFINIPATH_HWE_RXEMEMPARITYERR_MSG(EAGERTID),
- INFINIPATH_HWE_RXEMEMPARITYERR_MSG(EXPTID),
- INFINIPATH_HWE_RXEMEMPARITYERR_MSG(FLAGBUF),
- INFINIPATH_HWE_RXEMEMPARITYERR_MSG(DATAINFO),
- INFINIPATH_HWE_RXEMEMPARITYERR_MSG(HDRINFO),
-};
-
-/**
- * ipath_format_hwmsg - format a single hwerror message
- * @msg message buffer
- * @msgl length of message buffer
- * @hwmsg message to add to message buffer
- */
-static void ipath_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
-{
- strlcat(msg, "[", msgl);
- strlcat(msg, hwmsg, msgl);
- strlcat(msg, "]", msgl);
-}
-
-/**
- * ipath_format_hwerrors - format hardware error messages for display
- * @hwerrs hardware errors bit vector
- * @hwerrmsgs hardware error descriptions
- * @nhwerrmsgs number of hwerrmsgs
- * @msg message buffer
- * @msgl message buffer length
- */
-void ipath_format_hwerrors(u64 hwerrs,
- const struct ipath_hwerror_msgs *hwerrmsgs,
- size_t nhwerrmsgs,
- char *msg, size_t msgl)
-{
- int i;
- const int glen =
- ARRAY_SIZE(ipath_generic_hwerror_msgs);
-
- for (i=0; i<glen; i++) {
- if (hwerrs & ipath_generic_hwerror_msgs[i].mask) {
- ipath_format_hwmsg(msg, msgl,
- ipath_generic_hwerror_msgs[i].msg);
- }
- }
-
- for (i=0; i<nhwerrmsgs; i++) {
- if (hwerrs & hwerrmsgs[i].mask) {
- ipath_format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
- }
- }
-}
-
-/* return the strings for the most common link states */
-static char *ib_linkstate(struct ipath_devdata *dd, u64 ibcs)
-{
- char *ret;
- u32 state;
-
- state = ipath_ib_state(dd, ibcs);
- if (state == dd->ib_init)
- ret = "Init";
- else if (state == dd->ib_arm)
- ret = "Arm";
- else if (state == dd->ib_active)
- ret = "Active";
- else
- ret = "Down";
- return ret;
-}
-
-void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev)
-{
- struct ib_event event;
-
- event.device = &dd->verbs_dev->ibdev;
- event.element.port_num = 1;
- event.event = ev;
- ib_dispatch_event(&event);
-}
-
-static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
- ipath_err_t errs)
-{
- u32 ltstate, lstate, ibstate, lastlstate;
- u32 init = dd->ib_init;
- u32 arm = dd->ib_arm;
- u32 active = dd->ib_active;
- const u64 ibcs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
-
- lstate = ipath_ib_linkstate(dd, ibcs); /* linkstate */
- ibstate = ipath_ib_state(dd, ibcs);
- /* linkstate at last interrupt */
- lastlstate = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);
- ltstate = ipath_ib_linktrstate(dd, ibcs); /* linktrainingtate */
-
- /*
- * Since going into a recovery state causes the link state to go
- * down and since recovery is transitory, it is better if we "miss"
- * ever seeing the link training state go into recovery (i.e.,
- * ignore this transition for link state special handling purposes)
- * without even updating ipath_lastibcstat.
- */
- if ((ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN) ||
- (ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT) ||
- (ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERIDLE))
- goto done;
-
- /*
- * if linkstate transitions into INIT from any of the various down
- * states, or if it transitions from any of the up (INIT or better)
- * states into any of the down states (except link recovery), then
- * call the chip-specific code to take appropriate actions.
- */
- if (lstate >= INFINIPATH_IBCS_L_STATE_INIT &&
- lastlstate == INFINIPATH_IBCS_L_STATE_DOWN) {
- /* transitioned to UP */
- if (dd->ipath_f_ib_updown(dd, 1, ibcs)) {
- /* link came up, so we must no longer be disabled */
- dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
- ipath_cdbg(LINKVERB, "LinkUp handled, skipped\n");
- goto skip_ibchange; /* chip-code handled */
- }
- } else if ((lastlstate >= INFINIPATH_IBCS_L_STATE_INIT ||
- (dd->ipath_flags & IPATH_IB_FORCE_NOTIFY)) &&
- ltstate <= INFINIPATH_IBCS_LT_STATE_CFGWAITRMT &&
- ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {
- int handled;
- handled = dd->ipath_f_ib_updown(dd, 0, ibcs);
- dd->ipath_flags &= ~IPATH_IB_FORCE_NOTIFY;
- if (handled) {
- ipath_cdbg(LINKVERB, "LinkDown handled, skipped\n");
- goto skip_ibchange; /* chip-code handled */
- }
- }
-
- /*
- * Significant enough to always print and get into logs, if it was
- * unexpected. If it was a requested state change, we'll have
- * already cleared the flags, so we won't print this warning
- */
- if ((ibstate != arm && ibstate != active) &&
- (dd->ipath_flags & (IPATH_LINKARMED | IPATH_LINKACTIVE))) {
- dev_info(&dd->pcidev->dev, "Link state changed from %s "
- "to %s\n", (dd->ipath_flags & IPATH_LINKARMED) ?
- "ARM" : "ACTIVE", ib_linkstate(dd, ibcs));
- }
-
- if (ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
- ltstate == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
- u32 lastlts;
- lastlts = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
- /*
- * Ignore cycling back and forth from Polling.Active to
- * Polling.Quiet while waiting for the other end of the link
- * to come up, except to try and decide if we are connected
- * to a live IB device or not. We will cycle back and
- * forth between them if no cable is plugged in, the other
- * device is powered off or disabled, etc.
- */
- if (lastlts == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
- lastlts == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
- if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) &&
- (++dd->ipath_ibpollcnt == 40)) {
- dd->ipath_flags |= IPATH_NOCABLE;
- *dd->ipath_statusp |=
- IPATH_STATUS_IB_NOCABLE;
- ipath_cdbg(LINKVERB, "Set NOCABLE\n");
- }
- ipath_cdbg(LINKVERB, "POLL change to %s (%x)\n",
- ipath_ibcstatus_str[ltstate], ibstate);
- goto skip_ibchange;
- }
- }
-
- dd->ipath_ibpollcnt = 0; /* not poll*, now */
- ipath_stats.sps_iblink++;
-
- if (ibstate != init && dd->ipath_lastlinkrecov && ipath_linkrecovery) {
- u64 linkrecov;
- linkrecov = ipath_snap_cntr(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt);
- if (linkrecov != dd->ipath_lastlinkrecov) {
- ipath_dbg("IB linkrecov up %Lx (%s %s) recov %Lu\n",
- (unsigned long long) ibcs,
- ib_linkstate(dd, ibcs),
- ipath_ibcstatus_str[ltstate],
- (unsigned long long) linkrecov);
- /* and no more until active again */
- dd->ipath_lastlinkrecov = 0;
- ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
- goto skip_ibchange;
- }
- }
-
- if (ibstate == init || ibstate == arm || ibstate == active) {
- *dd->ipath_statusp &= ~IPATH_STATUS_IB_NOCABLE;
- if (ibstate == init || ibstate == arm) {
- *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
- if (dd->ipath_flags & IPATH_LINKACTIVE)
- signal_ib_event(dd, IB_EVENT_PORT_ERR);
- }
- if (ibstate == arm) {
- dd->ipath_flags |= IPATH_LINKARMED;
- dd->ipath_flags &= ~(IPATH_LINKUNK |
- IPATH_LINKINIT | IPATH_LINKDOWN |
- IPATH_LINKACTIVE | IPATH_NOCABLE);
- ipath_hol_down(dd);
- } else if (ibstate == init) {
- /*
- * set INIT and DOWN. Down is checked by
- * most of the other code, but INIT is
- * useful to know in a few places.
- */
- dd->ipath_flags |= IPATH_LINKINIT |
- IPATH_LINKDOWN;
- dd->ipath_flags &= ~(IPATH_LINKUNK |
- IPATH_LINKARMED | IPATH_LINKACTIVE |
- IPATH_NOCABLE);
- ipath_hol_down(dd);
- } else { /* active */
- dd->ipath_lastlinkrecov = ipath_snap_cntr(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt);
- *dd->ipath_statusp |=
- IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF;
- dd->ipath_flags |= IPATH_LINKACTIVE;
- dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
- | IPATH_LINKDOWN | IPATH_LINKARMED |
- IPATH_NOCABLE);
- if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
- ipath_restart_sdma(dd);
- signal_ib_event(dd, IB_EVENT_PORT_ACTIVE);
- /* LED active not handled in chip _f_updown */
- dd->ipath_f_setextled(dd, lstate, ltstate);
- ipath_hol_up(dd);
- }
-
- /*
- * print after we've already done the work, so as not to
- * delay the state changes and notifications, for debugging
- */
- if (lstate == lastlstate)
- ipath_cdbg(LINKVERB, "Unchanged from last: %s "
- "(%x)\n", ib_linkstate(dd, ibcs), ibstate);
- else
- ipath_cdbg(VERBOSE, "Unit %u: link up to %s %s (%x)\n",
- dd->ipath_unit, ib_linkstate(dd, ibcs),
- ipath_ibcstatus_str[ltstate], ibstate);
- } else { /* down */
- if (dd->ipath_flags & IPATH_LINKACTIVE)
- signal_ib_event(dd, IB_EVENT_PORT_ERR);
- dd->ipath_flags |= IPATH_LINKDOWN;
- dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
- | IPATH_LINKACTIVE |
- IPATH_LINKARMED);
- *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
- dd->ipath_lli_counter = 0;
-
- if (lastlstate != INFINIPATH_IBCS_L_STATE_DOWN)
- ipath_cdbg(VERBOSE, "Unit %u link state down "
- "(state 0x%x), from %s\n",
- dd->ipath_unit, lstate,
- ib_linkstate(dd, dd->ipath_lastibcstat));
- else
- ipath_cdbg(LINKVERB, "Unit %u link state changed "
- "to %s (0x%x) from down (%x)\n",
- dd->ipath_unit,
- ipath_ibcstatus_str[ltstate],
- ibstate, lastlstate);
- }
-
-skip_ibchange:
- dd->ipath_lastibcstat = ibcs;
-done:
- return;
-}
-
-static void handle_supp_msgs(struct ipath_devdata *dd,
- unsigned supp_msgs, char *msg, u32 msgsz)
-{
- /*
- * Print the message unless it's ibc status change only, which
- * happens so often we never want to count it.
- */
- if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) {
- int iserr;
- ipath_err_t mask;
- iserr = ipath_decode_err(dd, msg, msgsz,
- dd->ipath_lasterror &
- ~INFINIPATH_E_IBSTATUSCHANGED);
-
- mask = INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
- INFINIPATH_E_PKTERRS | INFINIPATH_E_SDMADISABLED;
-
- /* if we're in debug, then don't mask SDMADISABLED msgs */
- if (ipath_debug & __IPATH_DBG)
- mask &= ~INFINIPATH_E_SDMADISABLED;
-
- if (dd->ipath_lasterror & ~mask)
- ipath_dev_err(dd, "Suppressed %u messages for "
- "fast-repeating errors (%s) (%llx)\n",
- supp_msgs, msg,
- (unsigned long long)
- dd->ipath_lasterror);
- else {
- /*
- * rcvegrfull and rcvhdrqfull are "normal", for some
- * types of processes (mostly benchmarks) that send
- * huge numbers of messages, while not processing
- * them. So only complain about these at debug
- * level.
- */
- if (iserr)
- ipath_dbg("Suppressed %u messages for %s\n",
- supp_msgs, msg);
- else
- ipath_cdbg(ERRPKT,
- "Suppressed %u messages for %s\n",
- supp_msgs, msg);
- }
- }
-}
-
-static unsigned handle_frequent_errors(struct ipath_devdata *dd,
- ipath_err_t errs, char *msg,
- u32 msgsz, int *noprint)
-{
- unsigned long nc;
- static unsigned long nextmsg_time;
- static unsigned nmsgs, supp_msgs;
-
- /*
- * Throttle back "fast" messages to no more than 10 per 5 seconds.
- * This isn't perfect, but it's a reasonable heuristic. If we get
- * more than 10, give a 6x longer delay.
- */
- nc = jiffies;
- if (nmsgs > 10) {
- if (time_before(nc, nextmsg_time)) {
- *noprint = 1;
- if (!supp_msgs++)
- nextmsg_time = nc + HZ * 3;
- } else if (supp_msgs) {
- handle_supp_msgs(dd, supp_msgs, msg, msgsz);
- supp_msgs = 0;
- nmsgs = 0;
- }
- } else if (!nmsgs++ || time_after(nc, nextmsg_time)) {
- nextmsg_time = nc + HZ / 2;
- }
-
- return supp_msgs;
-}
-
-static void handle_sdma_errors(struct ipath_devdata *dd, ipath_err_t errs)
-{
- unsigned long flags;
- int expected;
-
- if (ipath_debug & __IPATH_DBG) {
- char msg[128];
- ipath_decode_err(dd, msg, sizeof msg, errs &
- INFINIPATH_E_SDMAERRS);
- ipath_dbg("errors %lx (%s)\n", (unsigned long)errs, msg);
- }
- if (ipath_debug & __IPATH_VERBDBG) {
- unsigned long tl, hd, status, lengen;
- tl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
- hd = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
- status = ipath_read_kreg64(dd
- , dd->ipath_kregs->kr_senddmastatus);
- lengen = ipath_read_kreg64(dd,
- dd->ipath_kregs->kr_senddmalengen);
- ipath_cdbg(VERBOSE, "sdma tl 0x%lx hd 0x%lx status 0x%lx "
- "lengen 0x%lx\n", tl, hd, status, lengen);
- }
-
- spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
- __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
- expected = test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
- spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
- if (!expected)
- ipath_cancel_sends(dd, 1);
-}
-
-static void handle_sdma_intr(struct ipath_devdata *dd, u64 istat)
-{
- unsigned long flags;
- int expected;
-
- if ((istat & INFINIPATH_I_SDMAINT) &&
- !test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
- ipath_sdma_intr(dd);
-
- if (istat & INFINIPATH_I_SDMADISABLED) {
- expected = test_bit(IPATH_SDMA_ABORTING,
- &dd->ipath_sdma_status);
- ipath_dbg("%s SDmaDisabled intr\n",
- expected ? "expected" : "unexpected");
- spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
- __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
- spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
- if (!expected)
- ipath_cancel_sends(dd, 1);
- if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
- tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
- }
-}
-
-static int handle_hdrq_full(struct ipath_devdata *dd)
-{
- int chkerrpkts = 0;
- u32 hd, tl;
- u32 i;
-
- ipath_stats.sps_hdrqfull++;
- for (i = 0; i < dd->ipath_cfgports; i++) {
- struct ipath_portdata *pd = dd->ipath_pd[i];
-
- if (i == 0) {
- /*
- * For kernel receive queues, we just want to know
- * if there are packets in the queue that we can
- * process.
- */
- if (pd->port_head != ipath_get_hdrqtail(pd))
- chkerrpkts |= 1 << i;
- continue;
- }
-
- /* Skip if user context is not open */
- if (!pd || !pd->port_cnt)
- continue;
-
- /* Don't report the same point multiple times. */
- if (dd->ipath_flags & IPATH_NODMA_RTAIL)
- tl = ipath_read_ureg32(dd, ur_rcvhdrtail, i);
- else
- tl = ipath_get_rcvhdrtail(pd);
- if (tl == pd->port_lastrcvhdrqtail)
- continue;
-
- hd = ipath_read_ureg32(dd, ur_rcvhdrhead, i);
- if (hd == (tl + 1) || (!hd && tl == dd->ipath_hdrqlast)) {
- pd->port_lastrcvhdrqtail = tl;
- pd->port_hdrqfull++;
- /* flush hdrqfull so that poll() sees it */
- wmb();
- wake_up_interruptible(&pd->port_wait);
- }
- }
-
- return chkerrpkts;
-}
-
-static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
-{
- char msg[128];
- u64 ignore_this_time = 0;
- u64 iserr = 0;
- int chkerrpkts = 0, noprint = 0;
- unsigned supp_msgs;
- int log_idx;
-
- /*
- * don't report errors that are masked, either at init
- * (not set in ipath_errormask), or temporarily (set in
- * ipath_maskederrs)
- */
- errs &= dd->ipath_errormask & ~dd->ipath_maskederrs;
-
- supp_msgs = handle_frequent_errors(dd, errs, msg, (u32)sizeof msg,
- &noprint);
-
- /* do these first, they are most important */
- if (errs & INFINIPATH_E_HARDWARE) {
- /* reuse same msg buf */
- dd->ipath_f_handle_hwerrors(dd, msg, sizeof msg);
- } else {
- u64 mask;
- for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx) {
- mask = dd->ipath_eep_st_masks[log_idx].errs_to_log;
- if (errs & mask)
- ipath_inc_eeprom_err(dd, log_idx, 1);
- }
- }
-
- if (errs & INFINIPATH_E_SDMAERRS)
- handle_sdma_errors(dd, errs);
-
- if (!noprint && (errs & ~dd->ipath_e_bitsextant))
- ipath_dev_err(dd, "error interrupt with unknown errors "
- "%llx set\n", (unsigned long long)
- (errs & ~dd->ipath_e_bitsextant));
-
- if (errs & E_SUM_ERRS)
- ignore_this_time = handle_e_sum_errs(dd, errs);
- else if ((errs & E_SUM_LINK_PKTERRS) &&
- !(dd->ipath_flags & IPATH_LINKACTIVE)) {
- /*
- * This can happen when SMA is trying to bring the link
- * up, but the IB link changes state at the "wrong" time.
- * The IB logic then complains that the packet isn't
- * valid. We don't want to confuse people, so we just
- * don't print them, except at debug
- */
- ipath_dbg("Ignoring packet errors %llx, because link not "
- "ACTIVE\n", (unsigned long long) errs);
- ignore_this_time = errs & E_SUM_LINK_PKTERRS;
- }
-
- if (supp_msgs == 250000) {
- int s_iserr;
- /*
- * It's not entirely reasonable assuming that the errors set
- * in the last clear period are all responsible for the
- * problem, but the alternative is to assume it's the only
- * ones on this particular interrupt, which also isn't great
- */
- dd->ipath_maskederrs |= dd->ipath_lasterror | errs;
-
- dd->ipath_errormask &= ~dd->ipath_maskederrs;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
- dd->ipath_errormask);
- s_iserr = ipath_decode_err(dd, msg, sizeof msg,
- dd->ipath_maskederrs);
-
- if (dd->ipath_maskederrs &
- ~(INFINIPATH_E_RRCVEGRFULL |
- INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
- ipath_dev_err(dd, "Temporarily disabling "
- "error(s) %llx reporting; too frequent (%s)\n",
- (unsigned long long) dd->ipath_maskederrs,
- msg);
- else {
- /*
- * rcvegrfull and rcvhdrqfull are "normal",
- * for some types of processes (mostly benchmarks)
- * that send huge numbers of messages, while not
- * processing them. So only complain about
- * these at debug level.
- */
- if (s_iserr)
- ipath_dbg("Temporarily disabling reporting "
- "too frequent queue full errors (%s)\n",
- msg);
- else
- ipath_cdbg(ERRPKT,
- "Temporarily disabling reporting too"
- " frequent packet errors (%s)\n",
- msg);
- }
-
- /*
- * Re-enable the masked errors after around 3 minutes. in
- * ipath_get_faststats(). If we have a series of fast
- * repeating but different errors, the interval will keep
- * stretching out, but that's OK, as that's pretty
- * catastrophic.
- */
- dd->ipath_unmasktime = jiffies + HZ * 180;
- }
-
- ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, errs);
- if (ignore_this_time)
- errs &= ~ignore_this_time;
- if (errs & ~dd->ipath_lasterror) {
- errs &= ~dd->ipath_lasterror;
- /* never suppress duplicate hwerrors or ibstatuschange */
- dd->ipath_lasterror |= errs &
- ~(INFINIPATH_E_HARDWARE |
- INFINIPATH_E_IBSTATUSCHANGED);
- }
-
- if (errs & INFINIPATH_E_SENDSPECIALTRIGGER) {
- dd->ipath_spectriggerhit++;
- ipath_dbg("%lu special trigger hits\n",
- dd->ipath_spectriggerhit);
- }
-
- /* likely due to cancel; so suppress message unless verbose */
- if ((errs & (INFINIPATH_E_SPKTLEN | INFINIPATH_E_SPIOARMLAUNCH)) &&
- time_after(dd->ipath_lastcancel, jiffies)) {
- /* armlaunch takes precedence; it often causes both. */
- ipath_cdbg(VERBOSE,
- "Suppressed %s error (%llx) after sendbuf cancel\n",
- (errs & INFINIPATH_E_SPIOARMLAUNCH) ?
- "armlaunch" : "sendpktlen", (unsigned long long)errs);
- errs &= ~(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SPKTLEN);
- }
-
- if (!errs)
- return 0;
-
- if (!noprint) {
- ipath_err_t mask;
- /*
- * The ones we mask off are handled specially below
- * or above. Also mask SDMADISABLED by default as it
- * is too chatty.
- */
- mask = INFINIPATH_E_IBSTATUSCHANGED |
- INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
- INFINIPATH_E_HARDWARE | INFINIPATH_E_SDMADISABLED;
-
- /* if we're in debug, then don't mask SDMADISABLED msgs */
- if (ipath_debug & __IPATH_DBG)
- mask &= ~INFINIPATH_E_SDMADISABLED;
-
- ipath_decode_err(dd, msg, sizeof msg, errs & ~mask);
- } else
- /* so we don't need if (!noprint) at strlcat's below */
- *msg = 0;
-
- if (errs & E_SUM_PKTERRS) {
- ipath_stats.sps_pkterrs++;
- chkerrpkts = 1;
- }
- if (errs & E_SUM_ERRS)
- ipath_stats.sps_errs++;
-
- if (errs & (INFINIPATH_E_RICRC | INFINIPATH_E_RVCRC)) {
- ipath_stats.sps_crcerrs++;
- chkerrpkts = 1;
- }
- iserr = errs & ~(E_SUM_PKTERRS | INFINIPATH_E_PKTERRS);
-
-
- /*
- * We don't want to print these two as they happen, or we can make
- * the situation even worse, because it takes so long to print
- * messages to serial consoles. Kernel ports get printed from
- * fast_stats, no more than every 5 seconds, user ports get printed
- * on close
- */
- if (errs & INFINIPATH_E_RRCVHDRFULL)
- chkerrpkts |= handle_hdrq_full(dd);
- if (errs & INFINIPATH_E_RRCVEGRFULL) {
- struct ipath_portdata *pd = dd->ipath_pd[0];
-
- /*
- * since this is of less importance and not likely to
- * happen without also getting hdrfull, only count
- * occurrences; don't check each port (or even the kernel
- * vs user)
- */
- ipath_stats.sps_etidfull++;
- if (pd->port_head != ipath_get_hdrqtail(pd))
- chkerrpkts |= 1;
- }
-
- /*
- * do this before IBSTATUSCHANGED, in case both bits set in a single
- * interrupt; we want the STATUSCHANGE to "win", so we do our
- * internal copy of state machine correctly
- */
- if (errs & INFINIPATH_E_RIBLOSTLINK) {
- /*
- * force through block below
- */
- errs |= INFINIPATH_E_IBSTATUSCHANGED;
- ipath_stats.sps_iblink++;
- dd->ipath_flags |= IPATH_LINKDOWN;
- dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
- | IPATH_LINKARMED | IPATH_LINKACTIVE);
- *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
-
- ipath_dbg("Lost link, link now down (%s)\n",
- ipath_ibcstatus_str[ipath_read_kreg64(dd,
- dd->ipath_kregs->kr_ibcstatus) & 0xf]);
- }
- if (errs & INFINIPATH_E_IBSTATUSCHANGED)
- handle_e_ibstatuschanged(dd, errs);
-
- if (errs & INFINIPATH_E_RESET) {
- if (!noprint)
- ipath_dev_err(dd, "Got reset, requires re-init "
- "(unload and reload driver)\n");
- dd->ipath_flags &= ~IPATH_INITTED; /* needs re-init */
- /* mark as having had error */
- *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
- *dd->ipath_statusp &= ~IPATH_STATUS_IB_CONF;
- }
-
- if (!noprint && *msg) {
- if (iserr)
- ipath_dev_err(dd, "%s error\n", msg);
- }
- if (dd->ipath_state_wanted & dd->ipath_flags) {
- ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, "
- "waking\n", dd->ipath_state_wanted,
- dd->ipath_flags);
- wake_up_interruptible(&ipath_state_wait);
- }
-
- return chkerrpkts;
-}
-
-/*
- * try to cleanup as much as possible for anything that might have gone
- * wrong while in freeze mode, such as pio buffers being written by user
- * processes (causing armlaunch), send errors due to going into freeze mode,
- * etc., and try to avoid causing extra interrupts while doing so.
- * Forcibly update the in-memory pioavail register copies after cleanup
- * because the chip won't do it while in freeze mode (the register values
- * themselves are kept correct).
- * Make sure that we don't lose any important interrupts by using the chip
- * feature that says that writing 0 to a bit in *clear that is set in
- * *status will cause an interrupt to be generated again (if allowed by
- * the *mask value).
- */
-void ipath_clear_freeze(struct ipath_devdata *dd)
-{
- /* disable error interrupts, to avoid confusion */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
-
- /* also disable interrupts; errormask is sometimes overwriten */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
-
- ipath_cancel_sends(dd, 1);
-
- /* clear the freeze, and be sure chip saw it */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
- dd->ipath_control);
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-
- /* force in-memory update now we are out of freeze */
- ipath_force_pio_avail_update(dd);
-
- /*
- * force new interrupt if any hwerr, error or interrupt bits are
- * still set, and clear "safe" send packet errors related to freeze
- * and cancelling sends. Re-enable error interrupts before possible
- * force of re-interrupt on pending interrupts.
- */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, 0ULL);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
- E_SPKT_ERRS_IGNORE);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
- dd->ipath_errormask);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, -1LL);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
-}
-
-
-/* this is separate to allow for better optimization of ipath_intr() */
-
-static noinline void ipath_bad_intr(struct ipath_devdata *dd, u32 *unexpectp)
-{
- /*
- * sometimes happen during driver init and unload, don't want
- * to process any interrupts at that point
- */
-
- /* this is just a bandaid, not a fix, if something goes badly
- * wrong */
- if (++*unexpectp > 100) {
- if (++*unexpectp > 105) {
- /*
- * ok, we must be taking somebody else's interrupts,
- * due to a messed up mptable and/or PIRQ table, so
- * unregister the interrupt. We've seen this during
- * linuxbios development work, and it may happen in
- * the future again.
- */
- if (dd->pcidev && dd->ipath_irq) {
- ipath_dev_err(dd, "Now %u unexpected "
- "interrupts, unregistering "
- "interrupt handler\n",
- *unexpectp);
- ipath_dbg("free_irq of irq %d\n",
- dd->ipath_irq);
- dd->ipath_f_free_irq(dd);
- }
- }
- if (ipath_read_ireg(dd, dd->ipath_kregs->kr_intmask)) {
- ipath_dev_err(dd, "%u unexpected interrupts, "
- "disabling interrupts completely\n",
- *unexpectp);
- /*
- * disable all interrupts, something is very wrong
- */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask,
- 0ULL);
- }
- } else if (*unexpectp > 1)
- ipath_dbg("Interrupt when not ready, should not happen, "
- "ignoring\n");
-}
-
-static noinline void ipath_bad_regread(struct ipath_devdata *dd)
-{
- static int allbits;
-
- /* separate routine, for better optimization of ipath_intr() */
-
- /*
- * We print the message and disable interrupts, in hope of
- * having a better chance of debugging the problem.
- */
- ipath_dev_err(dd,
- "Read of interrupt status failed (all bits set)\n");
- if (allbits++) {
- /* disable all interrupts, something is very wrong */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
- if (allbits == 2) {
- ipath_dev_err(dd, "Still bad interrupt status, "
- "unregistering interrupt\n");
- dd->ipath_f_free_irq(dd);
- } else if (allbits > 2) {
- if ((allbits % 10000) == 0)
- printk(".");
- } else
- ipath_dev_err(dd, "Disabling interrupts, "
- "multiple errors\n");
- }
-}
-
-static void handle_layer_pioavail(struct ipath_devdata *dd)
-{
- unsigned long flags;
- int ret;
-
- ret = ipath_ib_piobufavail(dd->verbs_dev);
- if (ret > 0)
- goto set;
-
- return;
-set:
- spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
- dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- dd->ipath_sendctrl);
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-}
-
-/*
- * Handle receive interrupts for user ports; this means a user
- * process was waiting for a packet to arrive, and didn't want
- * to poll
- */
-static void handle_urcv(struct ipath_devdata *dd, u64 istat)
-{
- u64 portr;
- int i;
- int rcvdint = 0;
-
- /*
- * test_and_clear_bit(IPATH_PORT_WAITING_RCV) and
- * test_and_clear_bit(IPATH_PORT_WAITING_URG) below
- * would both like timely updates of the bits so that
- * we don't pass them by unnecessarily. the rmb()
- * here ensures that we see them promptly -- the
- * corresponding wmb()'s are in ipath_poll_urgent()
- * and ipath_poll_next()...
- */
- rmb();
- portr = ((istat >> dd->ipath_i_rcvavail_shift) &
- dd->ipath_i_rcvavail_mask) |
- ((istat >> dd->ipath_i_rcvurg_shift) &
- dd->ipath_i_rcvurg_mask);
- for (i = 1; i < dd->ipath_cfgports; i++) {
- struct ipath_portdata *pd = dd->ipath_pd[i];
-
- if (portr & (1 << i) && pd && pd->port_cnt) {
- if (test_and_clear_bit(IPATH_PORT_WAITING_RCV,
- &pd->port_flag)) {
- clear_bit(i + dd->ipath_r_intravail_shift,
- &dd->ipath_rcvctrl);
- wake_up_interruptible(&pd->port_wait);
- rcvdint = 1;
- } else if (test_and_clear_bit(IPATH_PORT_WAITING_URG,
- &pd->port_flag)) {
- pd->port_urgent++;
- wake_up_interruptible(&pd->port_wait);
- }
- }
- }
- if (rcvdint) {
- /* only want to take one interrupt, so turn off the rcv
- * interrupt for all the ports that we set the rcv_waiting
- * (but never for kernel port)
- */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
- dd->ipath_rcvctrl);
- }
-}
-
-irqreturn_t ipath_intr(int irq, void *data)
-{
- struct ipath_devdata *dd = data;
- u64 istat, chk0rcv = 0;
- ipath_err_t estat = 0;
- irqreturn_t ret;
- static unsigned unexpected = 0;
- u64 kportrbits;
-
- ipath_stats.sps_ints++;
-
- if (dd->ipath_int_counter != (u32) -1)
- dd->ipath_int_counter++;
-
- if (!(dd->ipath_flags & IPATH_PRESENT)) {
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- return IRQ_HANDLED;
- }
-
- /*
- * this needs to be flags&initted, not statusp, so we keep
- * taking interrupts even after link goes down, etc.
- * Also, we *must* clear the interrupt at some point, or we won't
- * take it again, which can be real bad for errors, etc...
- */
-
- if (!(dd->ipath_flags & IPATH_INITTED)) {
- ipath_bad_intr(dd, &unexpected);
- ret = IRQ_NONE;
- goto bail;
- }
-
- istat = ipath_read_ireg(dd, dd->ipath_kregs->kr_intstatus);
-
- if (unlikely(!istat)) {
- ipath_stats.sps_nullintr++;
- ret = IRQ_NONE; /* not our interrupt, or already handled */
- goto bail;
- }
- if (unlikely(istat == -1)) {
- ipath_bad_regread(dd);
- /* don't know if it was our interrupt or not */
- ret = IRQ_NONE;
- goto bail;
- }
-
- if (unexpected)
- unexpected = 0;
-
- if (unlikely(istat & ~dd->ipath_i_bitsextant))
- ipath_dev_err(dd,
- "interrupt with unknown interrupts %Lx set\n",
- (unsigned long long)
- istat & ~dd->ipath_i_bitsextant);
- else if (istat & ~INFINIPATH_I_ERROR) /* errors do own printing */
- ipath_cdbg(VERBOSE, "intr stat=0x%Lx\n",
- (unsigned long long) istat);
-
- if (istat & INFINIPATH_I_ERROR) {
- ipath_stats.sps_errints++;
- estat = ipath_read_kreg64(dd,
- dd->ipath_kregs->kr_errorstatus);
- if (!estat)
- dev_info(&dd->pcidev->dev, "error interrupt (%Lx), "
- "but no error bits set!\n",
- (unsigned long long) istat);
- else if (estat == -1LL)
- /*
- * should we try clearing all, or hope next read
- * works?
- */
- ipath_dev_err(dd, "Read of error status failed "
- "(all bits set); ignoring\n");
- else
- chk0rcv |= handle_errors(dd, estat);
- }
-
- if (istat & INFINIPATH_I_GPIO) {
- /*
- * GPIO interrupts fall in two broad classes:
- * GPIO_2 indicates (on some HT4xx boards) that a packet
- * has arrived for Port 0. Checking for this
- * is controlled by flag IPATH_GPIO_INTR.
- * GPIO_3..5 on IBA6120 Rev2 and IBA6110 Rev4 chips indicate
- * errors that we need to count. Checking for this
- * is controlled by flag IPATH_GPIO_ERRINTRS.
- */
- u32 gpiostatus;
- u32 to_clear = 0;
-
- gpiostatus = ipath_read_kreg32(
- dd, dd->ipath_kregs->kr_gpio_status);
- /* First the error-counter case. */
- if ((gpiostatus & IPATH_GPIO_ERRINTR_MASK) &&
- (dd->ipath_flags & IPATH_GPIO_ERRINTRS)) {
- /* want to clear the bits we see asserted. */
- to_clear |= (gpiostatus & IPATH_GPIO_ERRINTR_MASK);
-
- /*
- * Count appropriately, clear bits out of our copy,
- * as they have been "handled".
- */
- if (gpiostatus & (1 << IPATH_GPIO_RXUVL_BIT)) {
- ipath_dbg("FlowCtl on UnsupVL\n");
- dd->ipath_rxfc_unsupvl_errs++;
- }
- if (gpiostatus & (1 << IPATH_GPIO_OVRUN_BIT)) {
- ipath_dbg("Overrun Threshold exceeded\n");
- dd->ipath_overrun_thresh_errs++;
- }
- if (gpiostatus & (1 << IPATH_GPIO_LLI_BIT)) {
- ipath_dbg("Local Link Integrity error\n");
- dd->ipath_lli_errs++;
- }
- gpiostatus &= ~IPATH_GPIO_ERRINTR_MASK;
- }
- /* Now the Port0 Receive case */
- if ((gpiostatus & (1 << IPATH_GPIO_PORT0_BIT)) &&
- (dd->ipath_flags & IPATH_GPIO_INTR)) {
- /*
- * GPIO status bit 2 is set, and we expected it.
- * clear it and indicate in p0bits.
- * This probably only happens if a Port0 pkt
- * arrives at _just_ the wrong time, and we
- * handle that by seting chk0rcv;
- */
- to_clear |= (1 << IPATH_GPIO_PORT0_BIT);
- gpiostatus &= ~(1 << IPATH_GPIO_PORT0_BIT);
- chk0rcv = 1;
- }
- if (gpiostatus) {
- /*
- * Some unexpected bits remain. If they could have
- * caused the interrupt, complain and clear.
- * To avoid repetition of this condition, also clear
- * the mask. It is almost certainly due to error.
- */
- const u32 mask = (u32) dd->ipath_gpio_mask;
-
- if (mask & gpiostatus) {
- ipath_dbg("Unexpected GPIO IRQ bits %x\n",
- gpiostatus & mask);
- to_clear |= (gpiostatus & mask);
- dd->ipath_gpio_mask &= ~(gpiostatus & mask);
- ipath_write_kreg(dd,
- dd->ipath_kregs->kr_gpio_mask,
- dd->ipath_gpio_mask);
- }
- }
- if (to_clear) {
- ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear,
- (u64) to_clear);
- }
- }
-
- /*
- * Clear the interrupt bits we found set, unless they are receive
- * related, in which case we already cleared them above, and don't
- * want to clear them again, because we might lose an interrupt.
- * Clear it early, so we "know" know the chip will have seen this by
- * the time we process the queue, and will re-interrupt if necessary.
- * The processor itself won't take the interrupt again until we return.
- */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
-
- /*
- * Handle kernel receive queues before checking for pio buffers
- * available since receives can overflow; piobuf waiters can afford
- * a few extra cycles, since they were waiting anyway, and user's
- * waiting for receive are at the bottom.
- */
- kportrbits = (1ULL << dd->ipath_i_rcvavail_shift) |
- (1ULL << dd->ipath_i_rcvurg_shift);
- if (chk0rcv || (istat & kportrbits)) {
- istat &= ~kportrbits;
- ipath_kreceive(dd->ipath_pd[0]);
- }
-
- if (istat & ((dd->ipath_i_rcvavail_mask << dd->ipath_i_rcvavail_shift) |
- (dd->ipath_i_rcvurg_mask << dd->ipath_i_rcvurg_shift)))
- handle_urcv(dd, istat);
-
- if (istat & (INFINIPATH_I_SDMAINT | INFINIPATH_I_SDMADISABLED))
- handle_sdma_intr(dd, istat);
-
- if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
- unsigned long flags;
-
- spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
- dd->ipath_sendctrl &= ~INFINIPATH_S_PIOINTBUFAVAIL;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- dd->ipath_sendctrl);
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
- /* always process; sdma verbs uses PIO for acks and VL15 */
- handle_layer_pioavail(dd);
- }
-
- ret = IRQ_HANDLED;
-
-bail:
- return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_kernel.h b/drivers/staging/rdma/ipath/ipath_kernel.h
deleted file mode 100644
index 66c934a5f839..000000000000
--- a/drivers/staging/rdma/ipath/ipath_kernel.h
+++ /dev/null
@@ -1,1374 +0,0 @@
-#ifndef _IPATH_KERNEL_H
-#define _IPATH_KERNEL_H
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This header file is the base header file for infinipath kernel code
- * ipath_user.h serves a similar purpose for user code.
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include <linux/scatterlist.h>
-#include <linux/sched.h>
-#include <asm/io.h>
-#include <rdma/ib_verbs.h>
-
-#include "ipath_common.h"
-#include "ipath_debug.h"
-#include "ipath_registers.h"
-
-/* only s/w major version of InfiniPath we can handle */
-#define IPATH_CHIP_VERS_MAJ 2U
-
-/* don't care about this except printing */
-#define IPATH_CHIP_VERS_MIN 0U
-
-/* temporary, maybe always */
-extern struct infinipath_stats ipath_stats;
-
-#define IPATH_CHIP_SWVERSION IPATH_CHIP_VERS_MAJ
-/*
- * First-cut critierion for "device is active" is
- * two thousand dwords combined Tx, Rx traffic per
- * 5-second interval. SMA packets are 64 dwords,
- * and occur "a few per second", presumably each way.
- */
-#define IPATH_TRAFFIC_ACTIVE_THRESHOLD (2000)
-/*
- * Struct used to indicate which errors are logged in each of the
- * error-counters that are logged to EEPROM. A counter is incremented
- * _once_ (saturating at 255) for each event with any bits set in
- * the error or hwerror register masks below.
- */
-#define IPATH_EEP_LOG_CNT (4)
-struct ipath_eep_log_mask {
- u64 errs_to_log;
- u64 hwerrs_to_log;
-};
-
-struct ipath_portdata {
- void **port_rcvegrbuf;
- dma_addr_t *port_rcvegrbuf_phys;
- /* rcvhdrq base, needs mmap before useful */
- void *port_rcvhdrq;
- /* kernel virtual address where hdrqtail is updated */
- void *port_rcvhdrtail_kvaddr;
- /*
- * temp buffer for expected send setup, allocated at open, instead
- * of each setup call
- */
- void *port_tid_pg_list;
- /* when waiting for rcv or pioavail */
- wait_queue_head_t port_wait;
- /*
- * rcvegr bufs base, physical, must fit
- * in 44 bits so 32 bit programs mmap64 44 bit works)
- */
- dma_addr_t port_rcvegr_phys;
- /* mmap of hdrq, must fit in 44 bits */
- dma_addr_t port_rcvhdrq_phys;
- dma_addr_t port_rcvhdrqtailaddr_phys;
- /*
- * number of opens (including slave subports) on this instance
- * (ignoring forks, dup, etc. for now)
- */
- int port_cnt;
- /*
- * how much space to leave at start of eager TID entries for
- * protocol use, on each TID
- */
- /* instead of calculating it */
- unsigned port_port;
- /* non-zero if port is being shared. */
- u16 port_subport_cnt;
- /* non-zero if port is being shared. */
- u16 port_subport_id;
- /* number of pio bufs for this port (all procs, if shared) */
- u32 port_piocnt;
- /* first pio buffer for this port */
- u32 port_pio_base;
- /* chip offset of PIO buffers for this port */
- u32 port_piobufs;
- /* how many alloc_pages() chunks in port_rcvegrbuf_pages */
- u32 port_rcvegrbuf_chunks;
- /* how many egrbufs per chunk */
- u32 port_rcvegrbufs_perchunk;
- /* order for port_rcvegrbuf_pages */
- size_t port_rcvegrbuf_size;
- /* rcvhdrq size (for freeing) */
- size_t port_rcvhdrq_size;
- /* next expected TID to check when looking for free */
- u32 port_tidcursor;
- /* next expected TID to check */
- unsigned long port_flag;
- /* what happened */
- unsigned long int_flag;
- /* WAIT_RCV that timed out, no interrupt */
- u32 port_rcvwait_to;
- /* WAIT_PIO that timed out, no interrupt */
- u32 port_piowait_to;
- /* WAIT_RCV already happened, no wait */
- u32 port_rcvnowait;
- /* WAIT_PIO already happened, no wait */
- u32 port_pionowait;
- /* total number of rcvhdrqfull errors */
- u32 port_hdrqfull;
- /*
- * Used to suppress multiple instances of same
- * port staying stuck at same point.
- */
- u32 port_lastrcvhdrqtail;
- /* saved total number of rcvhdrqfull errors for poll edge trigger */
- u32 port_hdrqfull_poll;
- /* total number of polled urgent packets */
- u32 port_urgent;
- /* saved total number of polled urgent packets for poll edge trigger */
- u32 port_urgent_poll;
- /* pid of process using this port */
- struct pid *port_pid;
- struct pid *port_subpid[INFINIPATH_MAX_SUBPORT];
- /* same size as task_struct .comm[] */
- char port_comm[TASK_COMM_LEN];
- /* pkeys set by this use of this port */
- u16 port_pkeys[4];
- /* so file ops can get at unit */
- struct ipath_devdata *port_dd;
- /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
- void *subport_uregbase;
- /* An array of pages for the eager receive buffers * N */
- void *subport_rcvegrbuf;
- /* An array of pages for the eager header queue entries * N */
- void *subport_rcvhdr_base;
- /* The version of the library which opened this port */
- u32 userversion;
- /* Bitmask of active slaves */
- u32 active_slaves;
- /* Type of packets or conditions we want to poll for */
- u16 poll_type;
- /* port rcvhdrq head offset */
- u32 port_head;
- /* receive packet sequence counter */
- u32 port_seq_cnt;
-};
-
-struct sk_buff;
-struct ipath_sge_state;
-struct ipath_verbs_txreq;
-
-/*
- * control information for layered drivers
- */
-struct _ipath_layer {
- void *l_arg;
-};
-
-struct ipath_skbinfo {
- struct sk_buff *skb;
- dma_addr_t phys;
-};
-
-struct ipath_sdma_txreq {
- int flags;
- int sg_count;
- union {
- struct scatterlist *sg;
- void *map_addr;
- };
- void (*callback)(void *, int);
- void *callback_cookie;
- int callback_status;
- u16 start_idx; /* sdma private */
- u16 next_descq_idx; /* sdma private */
- struct list_head list; /* sdma private */
-};
-
-struct ipath_sdma_desc {
- __le64 qw[2];
-};
-
-#define IPATH_SDMA_TXREQ_F_USELARGEBUF 0x1
-#define IPATH_SDMA_TXREQ_F_HEADTOHOST 0x2
-#define IPATH_SDMA_TXREQ_F_INTREQ 0x4
-#define IPATH_SDMA_TXREQ_F_FREEBUF 0x8
-#define IPATH_SDMA_TXREQ_F_FREEDESC 0x10
-#define IPATH_SDMA_TXREQ_F_VL15 0x20
-
-#define IPATH_SDMA_TXREQ_S_OK 0
-#define IPATH_SDMA_TXREQ_S_SENDERROR 1
-#define IPATH_SDMA_TXREQ_S_ABORTED 2
-#define IPATH_SDMA_TXREQ_S_SHUTDOWN 3
-
-#define IPATH_SDMA_STATUS_SCORE_BOARD_DRAIN_IN_PROG (1ull << 63)
-#define IPATH_SDMA_STATUS_ABORT_IN_PROG (1ull << 62)
-#define IPATH_SDMA_STATUS_INTERNAL_SDMA_ENABLE (1ull << 61)
-#define IPATH_SDMA_STATUS_SCB_EMPTY (1ull << 30)
-
-/* max dwords in small buffer packet */
-#define IPATH_SMALLBUF_DWORDS (dd->ipath_piosize2k >> 2)
-
-/*
- * Possible IB config parameters for ipath_f_get/set_ib_cfg()
- */
-#define IPATH_IB_CFG_LIDLMC 0 /* Get/set LID (LS16b) and Mask (MS16b) */
-#define IPATH_IB_CFG_HRTBT 1 /* Get/set Heartbeat off/enable/auto */
-#define IPATH_IB_HRTBT_ON 3 /* Heartbeat enabled, sent every 100msec */
-#define IPATH_IB_HRTBT_OFF 0 /* Heartbeat off */
-#define IPATH_IB_CFG_LWID_ENB 2 /* Get/set allowed Link-width */
-#define IPATH_IB_CFG_LWID 3 /* Get currently active Link-width */
-#define IPATH_IB_CFG_SPD_ENB 4 /* Get/set allowed Link speeds */
-#define IPATH_IB_CFG_SPD 5 /* Get current Link spd */
-#define IPATH_IB_CFG_RXPOL_ENB 6 /* Get/set Auto-RX-polarity enable */
-#define IPATH_IB_CFG_LREV_ENB 7 /* Get/set Auto-Lane-reversal enable */
-#define IPATH_IB_CFG_LINKLATENCY 8 /* Get Auto-Lane-reversal enable */
-
-
-struct ipath_devdata {
- struct list_head ipath_list;
-
- struct ipath_kregs const *ipath_kregs;
- struct ipath_cregs const *ipath_cregs;
-
- /* mem-mapped pointer to base of chip regs */
- u64 __iomem *ipath_kregbase;
- /* end of mem-mapped chip space; range checking */
- u64 __iomem *ipath_kregend;
- /* physical address of chip for io_remap, etc. */
- unsigned long ipath_physaddr;
- /* base of memory alloced for ipath_kregbase, for free */
- u64 *ipath_kregalloc;
- /* ipath_cfgports pointers */
- struct ipath_portdata **ipath_pd;
- /* sk_buffs used by port 0 eager receive queue */
- struct ipath_skbinfo *ipath_port0_skbinfo;
- /* kvirt address of 1st 2k pio buffer */
- void __iomem *ipath_pio2kbase;
- /* kvirt address of 1st 4k pio buffer */
- void __iomem *ipath_pio4kbase;
- /*
- * points to area where PIOavail registers will be DMA'ed.
- * Has to be on a page of it's own, because the page will be
- * mapped into user program space. This copy is *ONLY* ever
- * written by DMA, not by the driver! Need a copy per device
- * when we get to multiple devices
- */
- volatile __le64 *ipath_pioavailregs_dma;
- /* physical address where updates occur */
- dma_addr_t ipath_pioavailregs_phys;
- struct _ipath_layer ipath_layer;
- /* setup intr */
- int (*ipath_f_intrsetup)(struct ipath_devdata *);
- /* fallback to alternate interrupt type if possible */
- int (*ipath_f_intr_fallback)(struct ipath_devdata *);
- /* setup on-chip bus config */
- int (*ipath_f_bus)(struct ipath_devdata *, struct pci_dev *);
- /* hard reset chip */
- int (*ipath_f_reset)(struct ipath_devdata *);
- int (*ipath_f_get_boardname)(struct ipath_devdata *, char *,
- size_t);
- void (*ipath_f_init_hwerrors)(struct ipath_devdata *);
- void (*ipath_f_handle_hwerrors)(struct ipath_devdata *, char *,
- size_t);
- void (*ipath_f_quiet_serdes)(struct ipath_devdata *);
- int (*ipath_f_bringup_serdes)(struct ipath_devdata *);
- int (*ipath_f_early_init)(struct ipath_devdata *);
- void (*ipath_f_clear_tids)(struct ipath_devdata *, unsigned);
- void (*ipath_f_put_tid)(struct ipath_devdata *, u64 __iomem*,
- u32, unsigned long);
- void (*ipath_f_tidtemplate)(struct ipath_devdata *);
- void (*ipath_f_cleanup)(struct ipath_devdata *);
- void (*ipath_f_setextled)(struct ipath_devdata *, u64, u64);
- /* fill out chip-specific fields */
- int (*ipath_f_get_base_info)(struct ipath_portdata *, void *);
- /* free irq */
- void (*ipath_f_free_irq)(struct ipath_devdata *);
- struct ipath_message_header *(*ipath_f_get_msgheader)
- (struct ipath_devdata *, __le32 *);
- void (*ipath_f_config_ports)(struct ipath_devdata *, ushort);
- int (*ipath_f_get_ib_cfg)(struct ipath_devdata *, int);
- int (*ipath_f_set_ib_cfg)(struct ipath_devdata *, int, u32);
- void (*ipath_f_config_jint)(struct ipath_devdata *, u16 , u16);
- void (*ipath_f_read_counters)(struct ipath_devdata *,
- struct infinipath_counters *);
- void (*ipath_f_xgxs_reset)(struct ipath_devdata *);
- /* per chip actions needed for IB Link up/down changes */
- int (*ipath_f_ib_updown)(struct ipath_devdata *, int, u64);
-
- unsigned ipath_lastegr_idx;
- struct ipath_ibdev *verbs_dev;
- struct timer_list verbs_timer;
- /* total dwords sent (summed from counter) */
- u64 ipath_sword;
- /* total dwords rcvd (summed from counter) */
- u64 ipath_rword;
- /* total packets sent (summed from counter) */
- u64 ipath_spkts;
- /* total packets rcvd (summed from counter) */
- u64 ipath_rpkts;
- /* ipath_statusp initially points to this. */
- u64 _ipath_status;
- /* GUID for this interface, in network order */
- __be64 ipath_guid;
- /*
- * aggregrate of error bits reported since last cleared, for
- * limiting of error reporting
- */
- ipath_err_t ipath_lasterror;
- /*
- * aggregrate of error bits reported since last cleared, for
- * limiting of hwerror reporting
- */
- ipath_err_t ipath_lasthwerror;
- /* errors masked because they occur too fast */
- ipath_err_t ipath_maskederrs;
- u64 ipath_lastlinkrecov; /* link recoveries at last ACTIVE */
- /* these 5 fields are used to establish deltas for IB Symbol
- * errors and linkrecovery errors. They can be reported on
- * some chips during link negotiation prior to INIT, and with
- * DDR when faking DDR negotiations with non-IBTA switches.
- * The chip counters are adjusted at driver unload if there is
- * a non-zero delta.
- */
- u64 ibdeltainprog;
- u64 ibsymdelta;
- u64 ibsymsnap;
- u64 iblnkerrdelta;
- u64 iblnkerrsnap;
-
- /* time in jiffies at which to re-enable maskederrs */
- unsigned long ipath_unmasktime;
- /* count of egrfull errors, combined for all ports */
- u64 ipath_last_tidfull;
- /* for ipath_qcheck() */
- u64 ipath_lastport0rcv_cnt;
- /* template for writing TIDs */
- u64 ipath_tidtemplate;
- /* value to write to free TIDs */
- u64 ipath_tidinvalid;
- /* IBA6120 rcv interrupt setup */
- u64 ipath_rhdrhead_intr_off;
-
- /* size of memory at ipath_kregbase */
- u32 ipath_kregsize;
- /* number of registers used for pioavail */
- u32 ipath_pioavregs;
- /* IPATH_POLL, etc. */
- u32 ipath_flags;
- /* ipath_flags driver is waiting for */
- u32 ipath_state_wanted;
- /* last buffer for user use, first buf for kernel use is this
- * index. */
- u32 ipath_lastport_piobuf;
- /* is a stats timer active */
- u32 ipath_stats_timer_active;
- /* number of interrupts for this device -- saturates... */
- u32 ipath_int_counter;
- /* dwords sent read from counter */
- u32 ipath_lastsword;
- /* dwords received read from counter */
- u32 ipath_lastrword;
- /* sent packets read from counter */
- u32 ipath_lastspkts;
- /* received packets read from counter */
- u32 ipath_lastrpkts;
- /* pio bufs allocated per port */
- u32 ipath_pbufsport;
- /* if remainder on bufs/port, ports < extrabuf get 1 extra */
- u32 ipath_ports_extrabuf;
- u32 ipath_pioupd_thresh; /* update threshold, some chips */
- /*
- * number of ports configured as max; zero is set to number chip
- * supports, less gives more pio bufs/port, etc.
- */
- u32 ipath_cfgports;
- /* count of port 0 hdrqfull errors */
- u32 ipath_p0_hdrqfull;
- /* port 0 number of receive eager buffers */
- u32 ipath_p0_rcvegrcnt;
-
- /*
- * index of last piobuffer we used. Speeds up searching, by
- * starting at this point. Doesn't matter if multiple cpu's use and
- * update, last updater is only write that matters. Whenever it
- * wraps, we update shadow copies. Need a copy per device when we
- * get to multiple devices
- */
- u32 ipath_lastpioindex;
- u32 ipath_lastpioindexl;
- /* max length of freezemsg */
- u32 ipath_freezelen;
- /*
- * consecutive times we wanted a PIO buffer but were unable to
- * get one
- */
- u32 ipath_consec_nopiobuf;
- /*
- * hint that we should update ipath_pioavailshadow before
- * looking for a PIO buffer
- */
- u32 ipath_upd_pio_shadow;
- /* so we can rewrite it after a chip reset */
- u32 ipath_pcibar0;
- /* so we can rewrite it after a chip reset */
- u32 ipath_pcibar1;
- u32 ipath_x1_fix_tries;
- u32 ipath_autoneg_tries;
- u32 serdes_first_init_done;
-
- struct ipath_relock {
- atomic_t ipath_relock_timer_active;
- struct timer_list ipath_relock_timer;
- unsigned int ipath_relock_interval; /* in jiffies */
- } ipath_relock_singleton;
-
- /* interrupt number */
- int ipath_irq;
- /* HT/PCI Vendor ID (here for NodeInfo) */
- u16 ipath_vendorid;
- /* HT/PCI Device ID (here for NodeInfo) */
- u16 ipath_deviceid;
- /* offset in HT config space of slave/primary interface block */
- u8 ipath_ht_slave_off;
- /* for write combining settings */
- int wc_cookie;
- /* ref count for each pkey */
- atomic_t ipath_pkeyrefs[4];
- /* shadow copy of struct page *'s for exp tid pages */
- struct page **ipath_pageshadow;
- /* shadow copy of dma handles for exp tid pages */
- dma_addr_t *ipath_physshadow;
- u64 __iomem *ipath_egrtidbase;
- /* lock to workaround chip bug 9437 and others */
- spinlock_t ipath_kernel_tid_lock;
- spinlock_t ipath_user_tid_lock;
- spinlock_t ipath_sendctrl_lock;
- /* around ipath_pd and (user ports) port_cnt use (intr vs free) */
- spinlock_t ipath_uctxt_lock;
-
- /*
- * IPATH_STATUS_*,
- * this address is mapped readonly into user processes so they can
- * get status cheaply, whenever they want.
- */
- u64 *ipath_statusp;
- /* freeze msg if hw error put chip in freeze */
- char *ipath_freezemsg;
- /* pci access data structure */
- struct pci_dev *pcidev;
- struct cdev *user_cdev;
- struct cdev *diag_cdev;
- struct device *user_dev;
- struct device *diag_dev;
- /* timer used to prevent stats overflow, error throttling, etc. */
- struct timer_list ipath_stats_timer;
- /* timer to verify interrupts work, and fallback if possible */
- struct timer_list ipath_intrchk_timer;
- void *ipath_dummy_hdrq; /* used after port close */
- dma_addr_t ipath_dummy_hdrq_phys;
-
- /* SendDMA related entries */
- spinlock_t ipath_sdma_lock;
- unsigned long ipath_sdma_status;
- unsigned long ipath_sdma_abort_jiffies;
- unsigned long ipath_sdma_abort_intr_timeout;
- unsigned long ipath_sdma_buf_jiffies;
- struct ipath_sdma_desc *ipath_sdma_descq;
- u64 ipath_sdma_descq_added;
- u64 ipath_sdma_descq_removed;
- int ipath_sdma_desc_nreserved;
- u16 ipath_sdma_descq_cnt;
- u16 ipath_sdma_descq_tail;
- u16 ipath_sdma_descq_head;
- u16 ipath_sdma_next_intr;
- u16 ipath_sdma_reset_wait;
- u8 ipath_sdma_generation;
- struct tasklet_struct ipath_sdma_abort_task;
- struct tasklet_struct ipath_sdma_notify_task;
- struct list_head ipath_sdma_activelist;
- struct list_head ipath_sdma_notifylist;
- atomic_t ipath_sdma_vl15_count;
- struct timer_list ipath_sdma_vl15_timer;
-
- dma_addr_t ipath_sdma_descq_phys;
- volatile __le64 *ipath_sdma_head_dma;
- dma_addr_t ipath_sdma_head_phys;
-
- unsigned long ipath_ureg_align; /* user register alignment */
-
- struct delayed_work ipath_autoneg_work;
- wait_queue_head_t ipath_autoneg_wait;
-
- /* HoL blocking / user app forward-progress state */
- unsigned ipath_hol_state;
- unsigned ipath_hol_next;
- struct timer_list ipath_hol_timer;
-
- /*
- * Shadow copies of registers; size indicates read access size.
- * Most of them are readonly, but some are write-only register,
- * where we manipulate the bits in the shadow copy, and then write
- * the shadow copy to infinipath.
- *
- * We deliberately make most of these 32 bits, since they have
- * restricted range. For any that we read, we won't to generate 32
- * bit accesses, since Opteron will generate 2 separate 32 bit HT
- * transactions for a 64 bit read, and we want to avoid unnecessary
- * HT transactions.
- */
-
- /* This is the 64 bit group */
-
- /*
- * shadow of pioavail, check to be sure it's large enough at
- * init time.
- */
- unsigned long ipath_pioavailshadow[8];
- /* bitmap of send buffers available for the kernel to use with PIO. */
- unsigned long ipath_pioavailkernel[8];
- /* shadow of kr_gpio_out, for rmw ops */
- u64 ipath_gpio_out;
- /* shadow the gpio mask register */
- u64 ipath_gpio_mask;
- /* shadow the gpio output enable, etc... */
- u64 ipath_extctrl;
- /* kr_revision shadow */
- u64 ipath_revision;
- /*
- * shadow of ibcctrl, for interrupt handling of link changes,
- * etc.
- */
- u64 ipath_ibcctrl;
- /*
- * last ibcstatus, to suppress "duplicate" status change messages,
- * mostly from 2 to 3
- */
- u64 ipath_lastibcstat;
- /* hwerrmask shadow */
- ipath_err_t ipath_hwerrmask;
- ipath_err_t ipath_errormask; /* errormask shadow */
- /* interrupt config reg shadow */
- u64 ipath_intconfig;
- /* kr_sendpiobufbase value */
- u64 ipath_piobufbase;
- /* kr_ibcddrctrl shadow */
- u64 ipath_ibcddrctrl;
-
- /* these are the "32 bit" regs */
-
- /*
- * number of GUIDs in the flash for this interface; may need some
- * rethinking for setting on other ifaces
- */
- u32 ipath_nguid;
- /*
- * the following two are 32-bit bitmasks, but {test,clear,set}_bit
- * all expect bit fields to be "unsigned long"
- */
- /* shadow kr_rcvctrl */
- unsigned long ipath_rcvctrl;
- /* shadow kr_sendctrl */
- unsigned long ipath_sendctrl;
- /* to not count armlaunch after cancel */
- unsigned long ipath_lastcancel;
- /* count cases where special trigger was needed (double write) */
- unsigned long ipath_spectriggerhit;
-
- /* value we put in kr_rcvhdrcnt */
- u32 ipath_rcvhdrcnt;
- /* value we put in kr_rcvhdrsize */
- u32 ipath_rcvhdrsize;
- /* value we put in kr_rcvhdrentsize */
- u32 ipath_rcvhdrentsize;
- /* offset of last entry in rcvhdrq */
- u32 ipath_hdrqlast;
- /* kr_portcnt value */
- u32 ipath_portcnt;
- /* kr_pagealign value */
- u32 ipath_palign;
- /* number of "2KB" PIO buffers */
- u32 ipath_piobcnt2k;
- /* size in bytes of "2KB" PIO buffers */
- u32 ipath_piosize2k;
- /* number of "4KB" PIO buffers */
- u32 ipath_piobcnt4k;
- /* size in bytes of "4KB" PIO buffers */
- u32 ipath_piosize4k;
- u32 ipath_pioreserved; /* reserved special-inkernel; */
- /* kr_rcvegrbase value */
- u32 ipath_rcvegrbase;
- /* kr_rcvegrcnt value */
- u32 ipath_rcvegrcnt;
- /* kr_rcvtidbase value */
- u32 ipath_rcvtidbase;
- /* kr_rcvtidcnt value */
- u32 ipath_rcvtidcnt;
- /* kr_sendregbase */
- u32 ipath_sregbase;
- /* kr_userregbase */
- u32 ipath_uregbase;
- /* kr_counterregbase */
- u32 ipath_cregbase;
- /* shadow the control register contents */
- u32 ipath_control;
- /* PCI revision register (HTC rev on FPGA) */
- u32 ipath_pcirev;
-
- /* chip address space used by 4k pio buffers */
- u32 ipath_4kalign;
- /* The MTU programmed for this unit */
- u32 ipath_ibmtu;
- /*
- * The max size IB packet, included IB headers that we can send.
- * Starts same as ipath_piosize, but is affected when ibmtu is
- * changed, or by size of eager buffers
- */
- u32 ipath_ibmaxlen;
- /*
- * ibmaxlen at init time, limited by chip and by receive buffer
- * size. Not changed after init.
- */
- u32 ipath_init_ibmaxlen;
- /* size of each rcvegrbuffer */
- u32 ipath_rcvegrbufsize;
- /* localbus width (1, 2,4,8,16,32) from config space */
- u32 ipath_lbus_width;
- /* localbus speed (HT: 200,400,800,1000; PCIe 2500) */
- u32 ipath_lbus_speed;
- /*
- * number of sequential ibcstatus change for polling active/quiet
- * (i.e., link not coming up).
- */
- u32 ipath_ibpollcnt;
- /* low and high portions of MSI capability/vector */
- u32 ipath_msi_lo;
- /* saved after PCIe init for restore after reset */
- u32 ipath_msi_hi;
- /* MSI data (vector) saved for restore */
- u16 ipath_msi_data;
- /* MLID programmed for this instance */
- u16 ipath_mlid;
- /* LID programmed for this instance */
- u16 ipath_lid;
- /* list of pkeys programmed; 0 if not set */
- u16 ipath_pkeys[4];
- /*
- * ASCII serial number, from flash, large enough for original
- * all digit strings, and longer QLogic serial number format
- */
- u8 ipath_serial[16];
- /* human readable board version */
- u8 ipath_boardversion[96];
- u8 ipath_lbus_info[32]; /* human readable localbus info */
- /* chip major rev, from ipath_revision */
- u8 ipath_majrev;
- /* chip minor rev, from ipath_revision */
- u8 ipath_minrev;
- /* board rev, from ipath_revision */
- u8 ipath_boardrev;
- /* saved for restore after reset */
- u8 ipath_pci_cacheline;
- /* LID mask control */
- u8 ipath_lmc;
- /* link width supported */
- u8 ipath_link_width_supported;
- /* link speed supported */
- u8 ipath_link_speed_supported;
- u8 ipath_link_width_enabled;
- u8 ipath_link_speed_enabled;
- u8 ipath_link_width_active;
- u8 ipath_link_speed_active;
- /* Rx Polarity inversion (compensate for ~tx on partner) */
- u8 ipath_rx_pol_inv;
-
- u8 ipath_r_portenable_shift;
- u8 ipath_r_intravail_shift;
- u8 ipath_r_tailupd_shift;
- u8 ipath_r_portcfg_shift;
-
- /* unit # of this chip, if present */
- int ipath_unit;
-
- /* local link integrity counter */
- u32 ipath_lli_counter;
- /* local link integrity errors */
- u32 ipath_lli_errors;
- /*
- * Above counts only cases where _successive_ LocalLinkIntegrity
- * errors were seen in the receive headers of kern-packets.
- * Below are the three (monotonically increasing) counters
- * maintained via GPIO interrupts on iba6120-rev2.
- */
- u32 ipath_rxfc_unsupvl_errs;
- u32 ipath_overrun_thresh_errs;
- u32 ipath_lli_errs;
-
- /*
- * Not all devices managed by a driver instance are the same
- * type, so these fields must be per-device.
- */
- u64 ipath_i_bitsextant;
- ipath_err_t ipath_e_bitsextant;
- ipath_err_t ipath_hwe_bitsextant;
-
- /*
- * Below should be computable from number of ports,
- * since they are never modified.
- */
- u64 ipath_i_rcvavail_mask;
- u64 ipath_i_rcvurg_mask;
- u16 ipath_i_rcvurg_shift;
- u16 ipath_i_rcvavail_shift;
-
- /*
- * Register bits for selecting i2c direction and values, used for
- * I2C serial flash.
- */
- u8 ipath_gpio_sda_num;
- u8 ipath_gpio_scl_num;
- u8 ipath_i2c_chain_type;
- u64 ipath_gpio_sda;
- u64 ipath_gpio_scl;
-
- /* lock for doing RMW of shadows/regs for ExtCtrl and GPIO */
- spinlock_t ipath_gpio_lock;
-
- /*
- * IB link and linktraining states and masks that vary per chip in
- * some way. Set at init, to avoid each IB status change interrupt
- */
- u8 ibcs_ls_shift;
- u8 ibcs_lts_mask;
- u32 ibcs_mask;
- u32 ib_init;
- u32 ib_arm;
- u32 ib_active;
-
- u16 ipath_rhf_offset; /* offset of RHF within receive header entry */
-
- /*
- * shift/mask for linkcmd, linkinitcmd, maxpktlen in ibccontol
- * reg. Changes for IBA7220
- */
- u8 ibcc_lic_mask; /* LinkInitCmd */
- u8 ibcc_lc_shift; /* LinkCmd */
- u8 ibcc_mpl_shift; /* Maxpktlen */
-
- u8 delay_mult;
-
- /* used to override LED behavior */
- u8 ipath_led_override; /* Substituted for normal value, if non-zero */
- u16 ipath_led_override_timeoff; /* delta to next timer event */
- u8 ipath_led_override_vals[2]; /* Alternates per blink-frame */
- u8 ipath_led_override_phase; /* Just counts, LSB picks from vals[] */
- atomic_t ipath_led_override_timer_active;
- /* Used to flash LEDs in override mode */
- struct timer_list ipath_led_override_timer;
-
- /* Support (including locks) for EEPROM logging of errors and time */
- /* control access to actual counters, timer */
- spinlock_t ipath_eep_st_lock;
- /* control high-level access to EEPROM */
- struct mutex ipath_eep_lock;
- /* Below inc'd by ipath_snap_cntrs(), locked by ipath_eep_st_lock */
- uint64_t ipath_traffic_wds;
- /* active time is kept in seconds, but logged in hours */
- atomic_t ipath_active_time;
- /* Below are nominal shadow of EEPROM, new since last EEPROM update */
- uint8_t ipath_eep_st_errs[IPATH_EEP_LOG_CNT];
- uint8_t ipath_eep_st_new_errs[IPATH_EEP_LOG_CNT];
- uint16_t ipath_eep_hrs;
- /*
- * masks for which bits of errs, hwerrs that cause
- * each of the counters to increment.
- */
- struct ipath_eep_log_mask ipath_eep_st_masks[IPATH_EEP_LOG_CNT];
-
- /* interrupt mitigation reload register info */
- u16 ipath_jint_idle_ticks; /* idle clock ticks */
- u16 ipath_jint_max_packets; /* max packets across all ports */
-
- /*
- * lock for access to SerDes, and flags to sequence preset
- * versus steady-state. 7220-only at the moment.
- */
- spinlock_t ipath_sdepb_lock;
- u8 ipath_presets_needed; /* Set if presets to be restored next DOWN */
-};
-
-/* ipath_hol_state values (stopping/starting user proc, send flushing) */
-#define IPATH_HOL_UP 0
-#define IPATH_HOL_DOWN 1
-/* ipath_hol_next toggle values, used when hol_state IPATH_HOL_DOWN */
-#define IPATH_HOL_DOWNSTOP 0
-#define IPATH_HOL_DOWNCONT 1
-
-/* bit positions for sdma_status */
-#define IPATH_SDMA_ABORTING 0
-#define IPATH_SDMA_DISARMED 1
-#define IPATH_SDMA_DISABLED 2
-#define IPATH_SDMA_LAYERBUF 3
-#define IPATH_SDMA_RUNNING 30
-#define IPATH_SDMA_SHUTDOWN 31
-
-/* bit combinations that correspond to abort states */
-#define IPATH_SDMA_ABORT_NONE 0
-#define IPATH_SDMA_ABORT_ABORTING (1UL << IPATH_SDMA_ABORTING)
-#define IPATH_SDMA_ABORT_DISARMED ((1UL << IPATH_SDMA_ABORTING) | \
- (1UL << IPATH_SDMA_DISARMED))
-#define IPATH_SDMA_ABORT_DISABLED ((1UL << IPATH_SDMA_ABORTING) | \
- (1UL << IPATH_SDMA_DISABLED))
-#define IPATH_SDMA_ABORT_ABORTED ((1UL << IPATH_SDMA_ABORTING) | \
- (1UL << IPATH_SDMA_DISARMED) | (1UL << IPATH_SDMA_DISABLED))
-#define IPATH_SDMA_ABORT_MASK ((1UL<<IPATH_SDMA_ABORTING) | \
- (1UL << IPATH_SDMA_DISARMED) | (1UL << IPATH_SDMA_DISABLED))
-
-#define IPATH_SDMA_BUF_NONE 0
-#define IPATH_SDMA_BUF_MASK (1UL<<IPATH_SDMA_LAYERBUF)
-
-/* Private data for file operations */
-struct ipath_filedata {
- struct ipath_portdata *pd;
- unsigned subport;
- unsigned tidcursor;
- struct ipath_user_sdma_queue *pq;
-};
-extern struct list_head ipath_dev_list;
-extern spinlock_t ipath_devs_lock;
-extern struct ipath_devdata *ipath_lookup(int unit);
-
-int ipath_init_chip(struct ipath_devdata *, int);
-int ipath_enable_wc(struct ipath_devdata *dd);
-void ipath_disable_wc(struct ipath_devdata *dd);
-int ipath_count_units(int *npresentp, int *nupp, int *maxportsp);
-void ipath_shutdown_device(struct ipath_devdata *);
-void ipath_clear_freeze(struct ipath_devdata *);
-
-struct file_operations;
-int ipath_cdev_init(int minor, char *name, const struct file_operations *fops,
- struct cdev **cdevp, struct device **devp);
-void ipath_cdev_cleanup(struct cdev **cdevp,
- struct device **devp);
-
-int ipath_diag_add(struct ipath_devdata *);
-void ipath_diag_remove(struct ipath_devdata *);
-
-extern wait_queue_head_t ipath_state_wait;
-
-int ipath_user_add(struct ipath_devdata *dd);
-void ipath_user_remove(struct ipath_devdata *dd);
-
-struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd, gfp_t);
-
-extern int ipath_diag_inuse;
-
-irqreturn_t ipath_intr(int irq, void *devid);
-int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
- ipath_err_t err);
-#if __IPATH_INFO || __IPATH_DBG
-extern const char *ipath_ibcstatus_str[];
-#endif
-
-/* clean up any per-chip chip-specific stuff */
-void ipath_chip_cleanup(struct ipath_devdata *);
-/* clean up any chip type-specific stuff */
-void ipath_chip_done(void);
-
-void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
- unsigned cnt);
-void ipath_cancel_sends(struct ipath_devdata *, int);
-
-int ipath_create_rcvhdrq(struct ipath_devdata *, struct ipath_portdata *);
-void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *);
-
-int ipath_parse_ushort(const char *str, unsigned short *valp);
-
-void ipath_kreceive(struct ipath_portdata *);
-int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned);
-int ipath_reset_device(int);
-void ipath_get_faststats(unsigned long);
-int ipath_wait_linkstate(struct ipath_devdata *, u32, int);
-int ipath_set_linkstate(struct ipath_devdata *, u8);
-int ipath_set_mtu(struct ipath_devdata *, u16);
-int ipath_set_lid(struct ipath_devdata *, u32, u8);
-int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
-void ipath_enable_armlaunch(struct ipath_devdata *);
-void ipath_disable_armlaunch(struct ipath_devdata *);
-void ipath_hol_down(struct ipath_devdata *);
-void ipath_hol_up(struct ipath_devdata *);
-void ipath_hol_event(unsigned long);
-void ipath_toggle_rclkrls(struct ipath_devdata *);
-void ipath_sd7220_clr_ibpar(struct ipath_devdata *);
-void ipath_set_relock_poll(struct ipath_devdata *, int);
-void ipath_shutdown_relock_poll(struct ipath_devdata *);
-
-/* for use in system calls, where we want to know device type, etc. */
-#define port_fp(fp) ((struct ipath_filedata *)(fp)->private_data)->pd
-#define subport_fp(fp) \
- ((struct ipath_filedata *)(fp)->private_data)->subport
-#define tidcursor_fp(fp) \
- ((struct ipath_filedata *)(fp)->private_data)->tidcursor
-#define user_sdma_queue_fp(fp) \
- ((struct ipath_filedata *)(fp)->private_data)->pq
-
-/*
- * values for ipath_flags
- */
- /* chip can report link latency (IB 1.2) */
-#define IPATH_HAS_LINK_LATENCY 0x1
- /* The chip is up and initted */
-#define IPATH_INITTED 0x2
- /* set if any user code has set kr_rcvhdrsize */
-#define IPATH_RCVHDRSZ_SET 0x4
- /* The chip is present and valid for accesses */
-#define IPATH_PRESENT 0x8
- /* HT link0 is only 8 bits wide, ignore upper byte crc
- * errors, etc. */
-#define IPATH_8BIT_IN_HT0 0x10
- /* HT link1 is only 8 bits wide, ignore upper byte crc
- * errors, etc. */
-#define IPATH_8BIT_IN_HT1 0x20
- /* The link is down */
-#define IPATH_LINKDOWN 0x40
- /* The link level is up (0x11) */
-#define IPATH_LINKINIT 0x80
- /* The link is in the armed (0x21) state */
-#define IPATH_LINKARMED 0x100
- /* The link is in the active (0x31) state */
-#define IPATH_LINKACTIVE 0x200
- /* link current state is unknown */
-#define IPATH_LINKUNK 0x400
- /* Write combining flush needed for PIO */
-#define IPATH_PIO_FLUSH_WC 0x1000
- /* DMA Receive tail pointer */
-#define IPATH_NODMA_RTAIL 0x2000
- /* no IB cable, or no device on IB cable */
-#define IPATH_NOCABLE 0x4000
- /* Supports port zero per packet receive interrupts via
- * GPIO */
-#define IPATH_GPIO_INTR 0x8000
- /* uses the coded 4byte TID, not 8 byte */
-#define IPATH_4BYTE_TID 0x10000
- /* packet/word counters are 32 bit, else those 4 counters
- * are 64bit */
-#define IPATH_32BITCOUNTERS 0x20000
- /* Interrupt register is 64 bits */
-#define IPATH_INTREG_64 0x40000
- /* can miss port0 rx interrupts */
-#define IPATH_DISABLED 0x80000 /* administratively disabled */
- /* Use GPIO interrupts for new counters */
-#define IPATH_GPIO_ERRINTRS 0x100000
-#define IPATH_SWAP_PIOBUFS 0x200000
- /* Supports Send DMA */
-#define IPATH_HAS_SEND_DMA 0x400000
- /* Supports Send Count (not just word count) in PBC */
-#define IPATH_HAS_PBC_CNT 0x800000
- /* Suppress heartbeat, even if turning off loopback */
-#define IPATH_NO_HRTBT 0x1000000
-#define IPATH_HAS_THRESH_UPDATE 0x4000000
-#define IPATH_HAS_MULT_IB_SPEED 0x8000000
-#define IPATH_IB_AUTONEG_INPROG 0x10000000
-#define IPATH_IB_AUTONEG_FAILED 0x20000000
- /* Linkdown-disable intentionally, Do not attempt to bring up */
-#define IPATH_IB_LINK_DISABLED 0x40000000
-#define IPATH_IB_FORCE_NOTIFY 0x80000000 /* force notify on next ib change */
-
-/* Bits in GPIO for the added interrupts */
-#define IPATH_GPIO_PORT0_BIT 2
-#define IPATH_GPIO_RXUVL_BIT 3
-#define IPATH_GPIO_OVRUN_BIT 4
-#define IPATH_GPIO_LLI_BIT 5
-#define IPATH_GPIO_ERRINTR_MASK 0x38
-
-/* portdata flag bit offsets */
- /* waiting for a packet to arrive */
-#define IPATH_PORT_WAITING_RCV 2
- /* master has not finished initializing */
-#define IPATH_PORT_MASTER_UNINIT 4
- /* waiting for an urgent packet to arrive */
-#define IPATH_PORT_WAITING_URG 5
-
-/* free up any allocated data at closes */
-void ipath_free_data(struct ipath_portdata *dd);
-u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32, u32 *);
-void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
- unsigned len, int avail);
-void ipath_init_iba6110_funcs(struct ipath_devdata *);
-void ipath_get_eeprom_info(struct ipath_devdata *);
-int ipath_update_eeprom_log(struct ipath_devdata *dd);
-void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr);
-u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
-void ipath_disarm_senderrbufs(struct ipath_devdata *);
-void ipath_force_pio_avail_update(struct ipath_devdata *);
-void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev);
-
-/*
- * Set LED override, only the two LSBs have "public" meaning, but
- * any non-zero value substitutes them for the Link and LinkTrain
- * LED states.
- */
-#define IPATH_LED_PHYS 1 /* Physical (linktraining) GREEN LED */
-#define IPATH_LED_LOG 2 /* Logical (link) YELLOW LED */
-void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val);
-
-/* send dma routines */
-int setup_sdma(struct ipath_devdata *);
-void teardown_sdma(struct ipath_devdata *);
-void ipath_restart_sdma(struct ipath_devdata *);
-void ipath_sdma_intr(struct ipath_devdata *);
-int ipath_sdma_verbs_send(struct ipath_devdata *, struct ipath_sge_state *,
- u32, struct ipath_verbs_txreq *);
-/* ipath_sdma_lock should be locked before calling this. */
-int ipath_sdma_make_progress(struct ipath_devdata *dd);
-
-/* must be called under ipath_sdma_lock */
-static inline u16 ipath_sdma_descq_freecnt(const struct ipath_devdata *dd)
-{
- return dd->ipath_sdma_descq_cnt -
- (dd->ipath_sdma_descq_added - dd->ipath_sdma_descq_removed) -
- 1 - dd->ipath_sdma_desc_nreserved;
-}
-
-static inline void ipath_sdma_desc_reserve(struct ipath_devdata *dd, u16 cnt)
-{
- dd->ipath_sdma_desc_nreserved += cnt;
-}
-
-static inline void ipath_sdma_desc_unreserve(struct ipath_devdata *dd, u16 cnt)
-{
- dd->ipath_sdma_desc_nreserved -= cnt;
-}
-
-/*
- * number of words used for protocol header if not set by ipath_userinit();
- */
-#define IPATH_DFLT_RCVHDRSIZE 9
-
-int ipath_get_user_pages(unsigned long, size_t, struct page **);
-void ipath_release_user_pages(struct page **, size_t);
-void ipath_release_user_pages_on_close(struct page **, size_t);
-int ipath_eeprom_read(struct ipath_devdata *, u8, void *, int);
-int ipath_eeprom_write(struct ipath_devdata *, u8, const void *, int);
-int ipath_tempsense_read(struct ipath_devdata *, u8 regnum);
-int ipath_tempsense_write(struct ipath_devdata *, u8 regnum, u8 data);
-
-/* these are used for the registers that vary with port */
-void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg,
- unsigned, u64);
-
-/*
- * We could have a single register get/put routine, that takes a group type,
- * but this is somewhat clearer and cleaner. It also gives us some error
- * checking. 64 bit register reads should always work, but are inefficient
- * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
- * so we use kreg32 wherever possible. User register and counter register
- * reads are always 32 bit reads, so only one form of those routines.
- */
-
-/*
- * At the moment, none of the s-registers are writable, so no
- * ipath_write_sreg().
- */
-
-/**
- * ipath_read_ureg32 - read 32-bit virtualized per-port register
- * @dd: device
- * @regno: register number
- * @port: port number
- *
- * Return the contents of a register that is virtualized to be per port.
- * Returns -1 on errors (not distinguishable from valid contents at
- * runtime; we may add a separate error variable at some point).
- */
-static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd,
- ipath_ureg regno, int port)
-{
- if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
- return 0;
-
- return readl(regno + (u64 __iomem *)
- (dd->ipath_uregbase +
- (char __iomem *)dd->ipath_kregbase +
- dd->ipath_ureg_align * port));
-}
-
-/**
- * ipath_write_ureg - write 32-bit virtualized per-port register
- * @dd: device
- * @regno: register number
- * @value: value
- * @port: port
- *
- * Write the contents of a register that is virtualized to be per port.
- */
-static inline void ipath_write_ureg(const struct ipath_devdata *dd,
- ipath_ureg regno, u64 value, int port)
-{
- u64 __iomem *ubase = (u64 __iomem *)
- (dd->ipath_uregbase + (char __iomem *) dd->ipath_kregbase +
- dd->ipath_ureg_align * port);
- if (dd->ipath_kregbase)
- writeq(value, &ubase[regno]);
-}
-
-static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd,
- ipath_kreg regno)
-{
- if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
- return -1;
- return readl((u32 __iomem *) & dd->ipath_kregbase[regno]);
-}
-
-static inline u64 ipath_read_kreg64(const struct ipath_devdata *dd,
- ipath_kreg regno)
-{
- if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
- return -1;
-
- return readq(&dd->ipath_kregbase[regno]);
-}
-
-static inline void ipath_write_kreg(const struct ipath_devdata *dd,
- ipath_kreg regno, u64 value)
-{
- if (dd->ipath_kregbase)
- writeq(value, &dd->ipath_kregbase[regno]);
-}
-
-static inline u64 ipath_read_creg(const struct ipath_devdata *dd,
- ipath_sreg regno)
-{
- if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
- return 0;
-
- return readq(regno + (u64 __iomem *)
- (dd->ipath_cregbase +
- (char __iomem *)dd->ipath_kregbase));
-}
-
-static inline u32 ipath_read_creg32(const struct ipath_devdata *dd,
- ipath_sreg regno)
-{
- if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
- return 0;
- return readl(regno + (u64 __iomem *)
- (dd->ipath_cregbase +
- (char __iomem *)dd->ipath_kregbase));
-}
-
-static inline void ipath_write_creg(const struct ipath_devdata *dd,
- ipath_creg regno, u64 value)
-{
- if (dd->ipath_kregbase)
- writeq(value, regno + (u64 __iomem *)
- (dd->ipath_cregbase +
- (char __iomem *)dd->ipath_kregbase));
-}
-
-static inline void ipath_clear_rcvhdrtail(const struct ipath_portdata *pd)
-{
- *((u64 *) pd->port_rcvhdrtail_kvaddr) = 0ULL;
-}
-
-static inline u32 ipath_get_rcvhdrtail(const struct ipath_portdata *pd)
-{
- return (u32) le64_to_cpu(*((volatile __le64 *)
- pd->port_rcvhdrtail_kvaddr));
-}
-
-static inline u32 ipath_get_hdrqtail(const struct ipath_portdata *pd)
-{
- const struct ipath_devdata *dd = pd->port_dd;
- u32 hdrqtail;
-
- if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
- __le32 *rhf_addr;
- u32 seq;
-
- rhf_addr = (__le32 *) pd->port_rcvhdrq +
- pd->port_head + dd->ipath_rhf_offset;
- seq = ipath_hdrget_seq(rhf_addr);
- hdrqtail = pd->port_head;
- if (seq == pd->port_seq_cnt)
- hdrqtail++;
- } else
- hdrqtail = ipath_get_rcvhdrtail(pd);
-
- return hdrqtail;
-}
-
-static inline u64 ipath_read_ireg(const struct ipath_devdata *dd, ipath_kreg r)
-{
- return (dd->ipath_flags & IPATH_INTREG_64) ?
- ipath_read_kreg64(dd, r) : ipath_read_kreg32(dd, r);
-}
-
-/*
- * from contents of IBCStatus (or a saved copy), return linkstate
- * Report ACTIVE_DEFER as ACTIVE, because we treat them the same
- * everywhere, anyway (and should be, for almost all purposes).
- */
-static inline u32 ipath_ib_linkstate(struct ipath_devdata *dd, u64 ibcs)
-{
- u32 state = (u32)(ibcs >> dd->ibcs_ls_shift) &
- INFINIPATH_IBCS_LINKSTATE_MASK;
- if (state == INFINIPATH_IBCS_L_STATE_ACT_DEFER)
- state = INFINIPATH_IBCS_L_STATE_ACTIVE;
- return state;
-}
-
-/* from contents of IBCStatus (or a saved copy), return linktrainingstate */
-static inline u32 ipath_ib_linktrstate(struct ipath_devdata *dd, u64 ibcs)
-{
- return (u32)(ibcs >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
- dd->ibcs_lts_mask;
-}
-
-/*
- * from contents of IBCStatus (or a saved copy), return logical link state
- * combination of link state and linktraining state (down, active, init,
- * arm, etc.
- */
-static inline u32 ipath_ib_state(struct ipath_devdata *dd, u64 ibcs)
-{
- u32 ibs;
- ibs = (u32)(ibcs >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
- dd->ibcs_lts_mask;
- ibs |= (u32)(ibcs &
- (INFINIPATH_IBCS_LINKSTATE_MASK << dd->ibcs_ls_shift));
- return ibs;
-}
-
-/*
- * sysfs interface.
- */
-
-struct device_driver;
-
-extern const char ib_ipath_version[];
-
-extern const struct attribute_group *ipath_driver_attr_groups[];
-
-int ipath_device_create_group(struct device *, struct ipath_devdata *);
-void ipath_device_remove_group(struct device *, struct ipath_devdata *);
-int ipath_expose_reset(struct device *);
-
-int ipath_init_ipathfs(void);
-void ipath_exit_ipathfs(void);
-int ipathfs_add_device(struct ipath_devdata *);
-int ipathfs_remove_device(struct ipath_devdata *);
-
-/*
- * dma_addr wrappers - all 0's invalid for hw
- */
-dma_addr_t ipath_map_page(struct pci_dev *, struct page *, unsigned long,
- size_t, int);
-dma_addr_t ipath_map_single(struct pci_dev *, void *, size_t, int);
-const char *ipath_get_unit_name(int unit);
-
-/*
- * Flush write combining store buffers (if present) and perform a write
- * barrier.
- */
-#if defined(CONFIG_X86_64)
-#define ipath_flush_wc() asm volatile("sfence" ::: "memory")
-#else
-#define ipath_flush_wc() wmb()
-#endif
-
-extern unsigned ipath_debug; /* debugging bit mask */
-extern unsigned ipath_linkrecovery;
-extern unsigned ipath_mtu4096;
-extern struct mutex ipath_mutex;
-
-#define IPATH_DRV_NAME "ib_ipath"
-#define IPATH_MAJOR 233
-#define IPATH_USER_MINOR_BASE 0
-#define IPATH_DIAGPKT_MINOR 127
-#define IPATH_DIAG_MINOR_BASE 129
-#define IPATH_NMINORS 255
-
-#define ipath_dev_err(dd,fmt,...) \
- do { \
- const struct ipath_devdata *__dd = (dd); \
- if (__dd->pcidev) \
- dev_err(&__dd->pcidev->dev, "%s: " fmt, \
- ipath_get_unit_name(__dd->ipath_unit), \
- ##__VA_ARGS__); \
- else \
- printk(KERN_ERR IPATH_DRV_NAME ": %s: " fmt, \
- ipath_get_unit_name(__dd->ipath_unit), \
- ##__VA_ARGS__); \
- } while (0)
-
-#if _IPATH_DEBUGGING
-
-# define __IPATH_DBG_WHICH(which,fmt,...) \
- do { \
- if (unlikely(ipath_debug & (which))) \
- printk(KERN_DEBUG IPATH_DRV_NAME ": %s: " fmt, \
- __func__,##__VA_ARGS__); \
- } while(0)
-
-# define ipath_dbg(fmt,...) \
- __IPATH_DBG_WHICH(__IPATH_DBG,fmt,##__VA_ARGS__)
-# define ipath_cdbg(which,fmt,...) \
- __IPATH_DBG_WHICH(__IPATH_##which##DBG,fmt,##__VA_ARGS__)
-
-#else /* ! _IPATH_DEBUGGING */
-
-# define ipath_dbg(fmt,...)
-# define ipath_cdbg(which,fmt,...)
-
-#endif /* _IPATH_DEBUGGING */
-
-/*
- * this is used for formatting hw error messages...
- */
-struct ipath_hwerror_msgs {
- u64 mask;
- const char *msg;
-};
-
-#define INFINIPATH_HWE_MSG(a, b) { .mask = INFINIPATH_HWE_##a, .msg = b }
-
-/* in ipath_intr.c... */
-void ipath_format_hwerrors(u64 hwerrs,
- const struct ipath_hwerror_msgs *hwerrmsgs,
- size_t nhwerrmsgs,
- char *msg, size_t lmsg);
-
-#endif /* _IPATH_KERNEL_H */
diff --git a/drivers/staging/rdma/ipath/ipath_keys.c b/drivers/staging/rdma/ipath/ipath_keys.c
deleted file mode 100644
index c0e933fec218..000000000000
--- a/drivers/staging/rdma/ipath/ipath_keys.c
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <asm/io.h>
-
-#include "ipath_verbs.h"
-#include "ipath_kernel.h"
-
-/**
- * ipath_alloc_lkey - allocate an lkey
- * @rkt: lkey table in which to allocate the lkey
- * @mr: memory region that this lkey protects
- *
- * Returns 1 if successful, otherwise returns 0.
- */
-
-int ipath_alloc_lkey(struct ipath_lkey_table *rkt, struct ipath_mregion *mr)
-{
- unsigned long flags;
- u32 r;
- u32 n;
- int ret;
-
- spin_lock_irqsave(&rkt->lock, flags);
-
- /* Find the next available LKEY */
- r = n = rkt->next;
- for (;;) {
- if (rkt->table[r] == NULL)
- break;
- r = (r + 1) & (rkt->max - 1);
- if (r == n) {
- spin_unlock_irqrestore(&rkt->lock, flags);
- ipath_dbg("LKEY table full\n");
- ret = 0;
- goto bail;
- }
- }
- rkt->next = (r + 1) & (rkt->max - 1);
- /*
- * Make sure lkey is never zero which is reserved to indicate an
- * unrestricted LKEY.
- */
- rkt->gen++;
- mr->lkey = (r << (32 - ib_ipath_lkey_table_size)) |
- ((((1 << (24 - ib_ipath_lkey_table_size)) - 1) & rkt->gen)
- << 8);
- if (mr->lkey == 0) {
- mr->lkey |= 1 << 8;
- rkt->gen++;
- }
- rkt->table[r] = mr;
- spin_unlock_irqrestore(&rkt->lock, flags);
-
- ret = 1;
-
-bail:
- return ret;
-}
-
-/**
- * ipath_free_lkey - free an lkey
- * @rkt: table from which to free the lkey
- * @lkey: lkey id to free
- */
-void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey)
-{
- unsigned long flags;
- u32 r;
-
- if (lkey == 0)
- return;
- r = lkey >> (32 - ib_ipath_lkey_table_size);
- spin_lock_irqsave(&rkt->lock, flags);
- rkt->table[r] = NULL;
- spin_unlock_irqrestore(&rkt->lock, flags);
-}
-
-/**
- * ipath_lkey_ok - check IB SGE for validity and initialize
- * @rkt: table containing lkey to check SGE against
- * @isge: outgoing internal SGE
- * @sge: SGE to check
- * @acc: access flags
- *
- * Return 1 if valid and successful, otherwise returns 0.
- *
- * Check the IB SGE for validity and initialize our internal version
- * of it.
- */
-int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
- struct ib_sge *sge, int acc)
-{
- struct ipath_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
- struct ipath_mregion *mr;
- unsigned n, m;
- size_t off;
- int ret;
-
- /*
- * We use LKEY == zero for kernel virtual addresses
- * (see ipath_get_dma_mr and ipath_dma.c).
- */
- if (sge->lkey == 0) {
- /* always a kernel port, no locking needed */
- struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
-
- if (pd->user) {
- ret = 0;
- goto bail;
- }
- isge->mr = NULL;
- isge->vaddr = (void *) sge->addr;
- isge->length = sge->length;
- isge->sge_length = sge->length;
- ret = 1;
- goto bail;
- }
- mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))];
- if (unlikely(mr == NULL || mr->lkey != sge->lkey ||
- qp->ibqp.pd != mr->pd)) {
- ret = 0;
- goto bail;
- }
-
- off = sge->addr - mr->user_base;
- if (unlikely(sge->addr < mr->user_base ||
- off + sge->length > mr->length ||
- (mr->access_flags & acc) != acc)) {
- ret = 0;
- goto bail;
- }
-
- off += mr->offset;
- m = 0;
- n = 0;
- while (off >= mr->map[m]->segs[n].length) {
- off -= mr->map[m]->segs[n].length;
- n++;
- if (n >= IPATH_SEGSZ) {
- m++;
- n = 0;
- }
- }
- isge->mr = mr;
- isge->vaddr = mr->map[m]->segs[n].vaddr + off;
- isge->length = mr->map[m]->segs[n].length - off;
- isge->sge_length = sge->length;
- isge->m = m;
- isge->n = n;
-
- ret = 1;
-
-bail:
- return ret;
-}
-
-/**
- * ipath_rkey_ok - check the IB virtual address, length, and RKEY
- * @dev: infiniband device
- * @ss: SGE state
- * @len: length of data
- * @vaddr: virtual address to place data
- * @rkey: rkey to check
- * @acc: access flags
- *
- * Return 1 if successful, otherwise 0.
- */
-int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
- u32 len, u64 vaddr, u32 rkey, int acc)
-{
- struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
- struct ipath_lkey_table *rkt = &dev->lk_table;
- struct ipath_sge *sge = &ss->sge;
- struct ipath_mregion *mr;
- unsigned n, m;
- size_t off;
- int ret;
-
- /*
- * We use RKEY == zero for kernel virtual addresses
- * (see ipath_get_dma_mr and ipath_dma.c).
- */
- if (rkey == 0) {
- /* always a kernel port, no locking needed */
- struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
-
- if (pd->user) {
- ret = 0;
- goto bail;
- }
- sge->mr = NULL;
- sge->vaddr = (void *) vaddr;
- sge->length = len;
- sge->sge_length = len;
- ss->sg_list = NULL;
- ss->num_sge = 1;
- ret = 1;
- goto bail;
- }
-
- mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))];
- if (unlikely(mr == NULL || mr->lkey != rkey ||
- qp->ibqp.pd != mr->pd)) {
- ret = 0;
- goto bail;
- }
-
- off = vaddr - mr->iova;
- if (unlikely(vaddr < mr->iova || off + len > mr->length ||
- (mr->access_flags & acc) == 0)) {
- ret = 0;
- goto bail;
- }
-
- off += mr->offset;
- m = 0;
- n = 0;
- while (off >= mr->map[m]->segs[n].length) {
- off -= mr->map[m]->segs[n].length;
- n++;
- if (n >= IPATH_SEGSZ) {
- m++;
- n = 0;
- }
- }
- sge->mr = mr;
- sge->vaddr = mr->map[m]->segs[n].vaddr + off;
- sge->length = mr->map[m]->segs[n].length - off;
- sge->sge_length = len;
- sge->m = m;
- sge->n = n;
- ss->sg_list = NULL;
- ss->num_sge = 1;
-
- ret = 1;
-
-bail:
- return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_mad.c b/drivers/staging/rdma/ipath/ipath_mad.c
deleted file mode 100644
index ad3a926ab3c5..000000000000
--- a/drivers/staging/rdma/ipath/ipath_mad.c
+++ /dev/null
@@ -1,1521 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/ib_smi.h>
-#include <rdma/ib_pma.h>
-
-#include "ipath_kernel.h"
-#include "ipath_verbs.h"
-#include "ipath_common.h"
-
-#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
-#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
-#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
-#define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C)
-
-static int reply(struct ib_smp *smp)
-{
- /*
- * The verbs framework will handle the directed/LID route
- * packet changes.
- */
- smp->method = IB_MGMT_METHOD_GET_RESP;
- if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- smp->status |= IB_SMP_DIRECTION;
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
-}
-
-static int recv_subn_get_nodedescription(struct ib_smp *smp,
- struct ib_device *ibdev)
-{
- if (smp->attr_mod)
- smp->status |= IB_SMP_INVALID_FIELD;
-
- memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
-
- return reply(smp);
-}
-
-struct nodeinfo {
- u8 base_version;
- u8 class_version;
- u8 node_type;
- u8 num_ports;
- __be64 sys_guid;
- __be64 node_guid;
- __be64 port_guid;
- __be16 partition_cap;
- __be16 device_id;
- __be32 revision;
- u8 local_port_num;
- u8 vendor_id[3];
-} __attribute__ ((packed));
-
-static int recv_subn_get_nodeinfo(struct ib_smp *smp,
- struct ib_device *ibdev, u8 port)
-{
- struct nodeinfo *nip = (struct nodeinfo *)&smp->data;
- struct ipath_devdata *dd = to_idev(ibdev)->dd;
- u32 vendor, majrev, minrev;
-
- /* GUID 0 is illegal */
- if (smp->attr_mod || (dd->ipath_guid == 0))
- smp->status |= IB_SMP_INVALID_FIELD;
-
- nip->base_version = 1;
- nip->class_version = 1;
- nip->node_type = 1; /* channel adapter */
- /*
- * XXX The num_ports value will need a layer function to get
- * the value if we ever have more than one IB port on a chip.
- * We will also need to get the GUID for the port.
- */
- nip->num_ports = ibdev->phys_port_cnt;
- /* This is already in network order */
- nip->sys_guid = to_idev(ibdev)->sys_image_guid;
- nip->node_guid = dd->ipath_guid;
- nip->port_guid = dd->ipath_guid;
- nip->partition_cap = cpu_to_be16(ipath_get_npkeys(dd));
- nip->device_id = cpu_to_be16(dd->ipath_deviceid);
- majrev = dd->ipath_majrev;
- minrev = dd->ipath_minrev;
- nip->revision = cpu_to_be32((majrev << 16) | minrev);
- nip->local_port_num = port;
- vendor = dd->ipath_vendorid;
- nip->vendor_id[0] = IPATH_SRC_OUI_1;
- nip->vendor_id[1] = IPATH_SRC_OUI_2;
- nip->vendor_id[2] = IPATH_SRC_OUI_3;
-
- return reply(smp);
-}
-
-static int recv_subn_get_guidinfo(struct ib_smp *smp,
- struct ib_device *ibdev)
-{
- u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
- __be64 *p = (__be64 *) smp->data;
-
- /* 32 blocks of 8 64-bit GUIDs per block */
-
- memset(smp->data, 0, sizeof(smp->data));
-
- /*
- * We only support one GUID for now. If this changes, the
- * portinfo.guid_cap field needs to be updated too.
- */
- if (startgx == 0) {
- __be64 g = to_idev(ibdev)->dd->ipath_guid;
- if (g == 0)
- /* GUID 0 is illegal */
- smp->status |= IB_SMP_INVALID_FIELD;
- else
- /* The first is a copy of the read-only HW GUID. */
- *p = g;
- } else
- smp->status |= IB_SMP_INVALID_FIELD;
-
- return reply(smp);
-}
-
-static void set_link_width_enabled(struct ipath_devdata *dd, u32 w)
-{
- (void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, w);
-}
-
-static void set_link_speed_enabled(struct ipath_devdata *dd, u32 s)
-{
- (void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, s);
-}
-
-static int get_overrunthreshold(struct ipath_devdata *dd)
-{
- return (dd->ipath_ibcctrl >>
- INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
- INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
-}
-
-/**
- * set_overrunthreshold - set the overrun threshold
- * @dd: the infinipath device
- * @n: the new threshold
- *
- * Note that this will only take effect when the link state changes.
- */
-static int set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
-{
- unsigned v;
-
- v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
- INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
- if (v != n) {
- dd->ipath_ibcctrl &=
- ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
- INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
- dd->ipath_ibcctrl |=
- (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
- dd->ipath_ibcctrl);
- }
- return 0;
-}
-
-static int get_phyerrthreshold(struct ipath_devdata *dd)
-{
- return (dd->ipath_ibcctrl >>
- INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
- INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
-}
-
-/**
- * set_phyerrthreshold - set the physical error threshold
- * @dd: the infinipath device
- * @n: the new threshold
- *
- * Note that this will only take effect when the link state changes.
- */
-static int set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
-{
- unsigned v;
-
- v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
- INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
- if (v != n) {
- dd->ipath_ibcctrl &=
- ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
- INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
- dd->ipath_ibcctrl |=
- (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
- dd->ipath_ibcctrl);
- }
- return 0;
-}
-
-/**
- * get_linkdowndefaultstate - get the default linkdown state
- * @dd: the infinipath device
- *
- * Returns zero if the default is POLL, 1 if the default is SLEEP.
- */
-static int get_linkdowndefaultstate(struct ipath_devdata *dd)
-{
- return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
-}
-
-static int recv_subn_get_portinfo(struct ib_smp *smp,
- struct ib_device *ibdev, u8 port)
-{
- struct ipath_ibdev *dev;
- struct ipath_devdata *dd;
- struct ib_port_info *pip = (struct ib_port_info *)smp->data;
- u16 lid;
- u8 ibcstat;
- u8 mtu;
- int ret;
-
- if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt) {
- smp->status |= IB_SMP_INVALID_FIELD;
- ret = reply(smp);
- goto bail;
- }
-
- dev = to_idev(ibdev);
- dd = dev->dd;
-
- /* Clear all fields. Only set the non-zero fields. */
- memset(smp->data, 0, sizeof(smp->data));
-
- /* Only return the mkey if the protection field allows it. */
- if (smp->method == IB_MGMT_METHOD_SET || dev->mkey == smp->mkey ||
- dev->mkeyprot == 0)
- pip->mkey = dev->mkey;
- pip->gid_prefix = dev->gid_prefix;
- lid = dd->ipath_lid;
- pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
- pip->sm_lid = cpu_to_be16(dev->sm_lid);
- pip->cap_mask = cpu_to_be32(dev->port_cap_flags);
- /* pip->diag_code; */
- pip->mkey_lease_period = cpu_to_be16(dev->mkey_lease_period);
- pip->local_port_num = port;
- pip->link_width_enabled = dd->ipath_link_width_enabled;
- pip->link_width_supported = dd->ipath_link_width_supported;
- pip->link_width_active = dd->ipath_link_width_active;
- pip->linkspeed_portstate = dd->ipath_link_speed_supported << 4;
- ibcstat = dd->ipath_lastibcstat;
- /* map LinkState to IB portinfo values. */
- pip->linkspeed_portstate |= ipath_ib_linkstate(dd, ibcstat) + 1;
-
- pip->portphysstate_linkdown =
- (ipath_cvt_physportstate[ibcstat & dd->ibcs_lts_mask] << 4) |
- (get_linkdowndefaultstate(dd) ? 1 : 2);
- pip->mkeyprot_resv_lmc = (dev->mkeyprot << 6) | dd->ipath_lmc;
- pip->linkspeedactive_enabled = (dd->ipath_link_speed_active << 4) |
- dd->ipath_link_speed_enabled;
- switch (dd->ipath_ibmtu) {
- case 4096:
- mtu = IB_MTU_4096;
- break;
- case 2048:
- mtu = IB_MTU_2048;
- break;
- case 1024:
- mtu = IB_MTU_1024;
- break;
- case 512:
- mtu = IB_MTU_512;
- break;
- case 256:
- mtu = IB_MTU_256;
- break;
- default: /* oops, something is wrong */
- mtu = IB_MTU_2048;
- break;
- }
- pip->neighbormtu_mastersmsl = (mtu << 4) | dev->sm_sl;
- pip->vlcap_inittype = 0x10; /* VLCap = VL0, InitType = 0 */
- pip->vl_high_limit = dev->vl_high_limit;
- /* pip->vl_arb_high_cap; // only one VL */
- /* pip->vl_arb_low_cap; // only one VL */
- /* InitTypeReply = 0 */
- /* our mtu cap depends on whether 4K MTU enabled or not */
- pip->inittypereply_mtucap = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;
- /* HCAs ignore VLStallCount and HOQLife */
- /* pip->vlstallcnt_hoqlife; */
- pip->operationalvl_pei_peo_fpi_fpo = 0x10; /* OVLs = 1 */
- pip->mkey_violations = cpu_to_be16(dev->mkey_violations);
- /* P_KeyViolations are counted by hardware. */
- pip->pkey_violations =
- cpu_to_be16((ipath_get_cr_errpkey(dd) -
- dev->z_pkey_violations) & 0xFFFF);
- pip->qkey_violations = cpu_to_be16(dev->qkey_violations);
- /* Only the hardware GUID is supported for now */
- pip->guid_cap = 1;
- pip->clientrereg_resv_subnetto = dev->subnet_timeout;
- /* 32.768 usec. response time (guessing) */
- pip->resv_resptimevalue = 3;
- pip->localphyerrors_overrunerrors =
- (get_phyerrthreshold(dd) << 4) |
- get_overrunthreshold(dd);
- /* pip->max_credit_hint; */
- if (dev->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
- u32 v;
-
- v = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LINKLATENCY);
- pip->link_roundtrip_latency[0] = v >> 16;
- pip->link_roundtrip_latency[1] = v >> 8;
- pip->link_roundtrip_latency[2] = v;
- }
-
- ret = reply(smp);
-
-bail:
- return ret;
-}
-
-/**
- * get_pkeys - return the PKEY table for port 0
- * @dd: the infinipath device
- * @pkeys: the pkey table is placed here
- */
-static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
-{
- /* always a kernel port, no locking needed */
- struct ipath_portdata *pd = dd->ipath_pd[0];
-
- memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
-
- return 0;
-}
-
-static int recv_subn_get_pkeytable(struct ib_smp *smp,
- struct ib_device *ibdev)
-{
- u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
- u16 *p = (u16 *) smp->data;
- __be16 *q = (__be16 *) smp->data;
-
- /* 64 blocks of 32 16-bit P_Key entries */
-
- memset(smp->data, 0, sizeof(smp->data));
- if (startpx == 0) {
- struct ipath_ibdev *dev = to_idev(ibdev);
- unsigned i, n = ipath_get_npkeys(dev->dd);
-
- get_pkeys(dev->dd, p);
-
- for (i = 0; i < n; i++)
- q[i] = cpu_to_be16(p[i]);
- } else
- smp->status |= IB_SMP_INVALID_FIELD;
-
- return reply(smp);
-}
-
-static int recv_subn_set_guidinfo(struct ib_smp *smp,
- struct ib_device *ibdev)
-{
- /* The only GUID we support is the first read-only entry. */
- return recv_subn_get_guidinfo(smp, ibdev);
-}
-
-/**
- * set_linkdowndefaultstate - set the default linkdown state
- * @dd: the infinipath device
- * @sleep: the new state
- *
- * Note that this will only take effect when the link state changes.
- */
-static int set_linkdowndefaultstate(struct ipath_devdata *dd, int sleep)
-{
- if (sleep)
- dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
- else
- dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
- dd->ipath_ibcctrl);
- return 0;
-}
-
-/**
- * recv_subn_set_portinfo - set port information
- * @smp: the incoming SM packet
- * @ibdev: the infiniband device
- * @port: the port on the device
- *
- * Set Portinfo (see ch. 14.2.5.6).
- */
-static int recv_subn_set_portinfo(struct ib_smp *smp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_port_info *pip = (struct ib_port_info *)smp->data;
- struct ib_event event;
- struct ipath_ibdev *dev;
- struct ipath_devdata *dd;
- char clientrereg = 0;
- u16 lid, smlid;
- u8 lwe;
- u8 lse;
- u8 state;
- u16 lstate;
- u32 mtu;
- int ret, ore;
-
- if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt)
- goto err;
-
- dev = to_idev(ibdev);
- dd = dev->dd;
- event.device = ibdev;
- event.element.port_num = port;
-
- dev->mkey = pip->mkey;
- dev->gid_prefix = pip->gid_prefix;
- dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
-
- lid = be16_to_cpu(pip->lid);
- if (dd->ipath_lid != lid ||
- dd->ipath_lmc != (pip->mkeyprot_resv_lmc & 7)) {
- /* Must be a valid unicast LID address. */
- if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE)
- goto err;
- ipath_set_lid(dd, lid, pip->mkeyprot_resv_lmc & 7);
- event.event = IB_EVENT_LID_CHANGE;
- ib_dispatch_event(&event);
- }
-
- smlid = be16_to_cpu(pip->sm_lid);
- if (smlid != dev->sm_lid) {
- /* Must be a valid unicast LID address. */
- if (smlid == 0 || smlid >= IPATH_MULTICAST_LID_BASE)
- goto err;
- dev->sm_lid = smlid;
- event.event = IB_EVENT_SM_CHANGE;
- ib_dispatch_event(&event);
- }
-
- /* Allow 1x or 4x to be set (see 14.2.6.6). */
- lwe = pip->link_width_enabled;
- if (lwe) {
- if (lwe == 0xFF)
- lwe = dd->ipath_link_width_supported;
- else if (lwe >= 16 || (lwe & ~dd->ipath_link_width_supported))
- goto err;
- set_link_width_enabled(dd, lwe);
- }
-
- /* Allow 2.5 or 5.0 Gbs. */
- lse = pip->linkspeedactive_enabled & 0xF;
- if (lse) {
- if (lse == 15)
- lse = dd->ipath_link_speed_supported;
- else if (lse >= 8 || (lse & ~dd->ipath_link_speed_supported))
- goto err;
- set_link_speed_enabled(dd, lse);
- }
-
- /* Set link down default state. */
- switch (pip->portphysstate_linkdown & 0xF) {
- case 0: /* NOP */
- break;
- case 1: /* SLEEP */
- if (set_linkdowndefaultstate(dd, 1))
- goto err;
- break;
- case 2: /* POLL */
- if (set_linkdowndefaultstate(dd, 0))
- goto err;
- break;
- default:
- goto err;
- }
-
- dev->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
- dev->vl_high_limit = pip->vl_high_limit;
-
- switch ((pip->neighbormtu_mastersmsl >> 4) & 0xF) {
- case IB_MTU_256:
- mtu = 256;
- break;
- case IB_MTU_512:
- mtu = 512;
- break;
- case IB_MTU_1024:
- mtu = 1024;
- break;
- case IB_MTU_2048:
- mtu = 2048;
- break;
- case IB_MTU_4096:
- if (!ipath_mtu4096)
- goto err;
- mtu = 4096;
- break;
- default:
- /* XXX We have already partially updated our state! */
- goto err;
- }
- ipath_set_mtu(dd, mtu);
-
- dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF;
-
- /* We only support VL0 */
- if (((pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF) > 1)
- goto err;
-
- if (pip->mkey_violations == 0)
- dev->mkey_violations = 0;
-
- /*
- * Hardware counter can't be reset so snapshot and subtract
- * later.
- */
- if (pip->pkey_violations == 0)
- dev->z_pkey_violations = ipath_get_cr_errpkey(dd);
-
- if (pip->qkey_violations == 0)
- dev->qkey_violations = 0;
-
- ore = pip->localphyerrors_overrunerrors;
- if (set_phyerrthreshold(dd, (ore >> 4) & 0xF))
- goto err;
-
- if (set_overrunthreshold(dd, (ore & 0xF)))
- goto err;
-
- dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
-
- if (pip->clientrereg_resv_subnetto & 0x80) {
- clientrereg = 1;
- event.event = IB_EVENT_CLIENT_REREGISTER;
- ib_dispatch_event(&event);
- }
-
- /*
- * Do the port state change now that the other link parameters
- * have been set.
- * Changing the port physical state only makes sense if the link
- * is down or is being set to down.
- */
- state = pip->linkspeed_portstate & 0xF;
- lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
- if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
- goto err;
-
- /*
- * Only state changes of DOWN, ARM, and ACTIVE are valid
- * and must be in the correct state to take effect (see 7.2.6).
- */
- switch (state) {
- case IB_PORT_NOP:
- if (lstate == 0)
- break;
- /* FALLTHROUGH */
- case IB_PORT_DOWN:
- if (lstate == 0)
- lstate = IPATH_IB_LINKDOWN_ONLY;
- else if (lstate == 1)
- lstate = IPATH_IB_LINKDOWN_SLEEP;
- else if (lstate == 2)
- lstate = IPATH_IB_LINKDOWN;
- else if (lstate == 3)
- lstate = IPATH_IB_LINKDOWN_DISABLE;
- else
- goto err;
- ipath_set_linkstate(dd, lstate);
- if (lstate == IPATH_IB_LINKDOWN_DISABLE) {
- ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
- goto done;
- }
- ipath_wait_linkstate(dd, IPATH_LINKINIT | IPATH_LINKARMED |
- IPATH_LINKACTIVE, 1000);
- break;
- case IB_PORT_ARMED:
- ipath_set_linkstate(dd, IPATH_IB_LINKARM);
- break;
- case IB_PORT_ACTIVE:
- ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE);
- break;
- default:
- /* XXX We have already partially updated our state! */
- goto err;
- }
-
- ret = recv_subn_get_portinfo(smp, ibdev, port);
-
- if (clientrereg)
- pip->clientrereg_resv_subnetto |= 0x80;
-
- goto done;
-
-err:
- smp->status |= IB_SMP_INVALID_FIELD;
- ret = recv_subn_get_portinfo(smp, ibdev, port);
-
-done:
- return ret;
-}
-
-/**
- * rm_pkey - decrecment the reference count for the given PKEY
- * @dd: the infinipath device
- * @key: the PKEY index
- *
- * Return true if this was the last reference and the hardware table entry
- * needs to be changed.
- */
-static int rm_pkey(struct ipath_devdata *dd, u16 key)
-{
- int i;
- int ret;
-
- for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
- if (dd->ipath_pkeys[i] != key)
- continue;
- if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
- dd->ipath_pkeys[i] = 0;
- ret = 1;
- goto bail;
- }
- break;
- }
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * add_pkey - add the given PKEY to the hardware table
- * @dd: the infinipath device
- * @key: the PKEY
- *
- * Return an error code if unable to add the entry, zero if no change,
- * or 1 if the hardware PKEY register needs to be updated.
- */
-static int add_pkey(struct ipath_devdata *dd, u16 key)
-{
- int i;
- u16 lkey = key & 0x7FFF;
- int any = 0;
- int ret;
-
- if (lkey == 0x7FFF) {
- ret = 0;
- goto bail;
- }
-
- /* Look for an empty slot or a matching PKEY. */
- for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
- if (!dd->ipath_pkeys[i]) {
- any++;
- continue;
- }
- /* If it matches exactly, try to increment the ref count */
- if (dd->ipath_pkeys[i] == key) {
- if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
- ret = 0;
- goto bail;
- }
- /* Lost the race. Look for an empty slot below. */
- atomic_dec(&dd->ipath_pkeyrefs[i]);
- any++;
- }
- /*
- * It makes no sense to have both the limited and unlimited
- * PKEY set at the same time since the unlimited one will
- * disable the limited one.
- */
- if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
- ret = -EEXIST;
- goto bail;
- }
- }
- if (!any) {
- ret = -EBUSY;
- goto bail;
- }
- for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
- if (!dd->ipath_pkeys[i] &&
- atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
- /* for ipathstats, etc. */
- ipath_stats.sps_pkeys[i] = lkey;
- dd->ipath_pkeys[i] = key;
- ret = 1;
- goto bail;
- }
- }
- ret = -EBUSY;
-
-bail:
- return ret;
-}
-
-/**
- * set_pkeys - set the PKEY table for port 0
- * @dd: the infinipath device
- * @pkeys: the PKEY table
- */
-static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys, u8 port)
-{
- struct ipath_portdata *pd;
- int i;
- int changed = 0;
-
- /* always a kernel port, no locking needed */
- pd = dd->ipath_pd[0];
-
- for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
- u16 key = pkeys[i];
- u16 okey = pd->port_pkeys[i];
-
- if (key == okey)
- continue;
- /*
- * The value of this PKEY table entry is changing.
- * Remove the old entry in the hardware's array of PKEYs.
- */
- if (okey & 0x7FFF)
- changed |= rm_pkey(dd, okey);
- if (key & 0x7FFF) {
- int ret = add_pkey(dd, key);
-
- if (ret < 0)
- key = 0;
- else
- changed |= ret;
- }
- pd->port_pkeys[i] = key;
- }
- if (changed) {
- u64 pkey;
- struct ib_event event;
-
- pkey = (u64) dd->ipath_pkeys[0] |
- ((u64) dd->ipath_pkeys[1] << 16) |
- ((u64) dd->ipath_pkeys[2] << 32) |
- ((u64) dd->ipath_pkeys[3] << 48);
- ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
- (unsigned long long) pkey);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
- pkey);
-
- event.event = IB_EVENT_PKEY_CHANGE;
- event.device = &dd->verbs_dev->ibdev;
- event.element.port_num = port;
- ib_dispatch_event(&event);
- }
- return 0;
-}
-
-static int recv_subn_set_pkeytable(struct ib_smp *smp,
- struct ib_device *ibdev, u8 port)
-{
- u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
- __be16 *p = (__be16 *) smp->data;
- u16 *q = (u16 *) smp->data;
- struct ipath_ibdev *dev = to_idev(ibdev);
- unsigned i, n = ipath_get_npkeys(dev->dd);
-
- for (i = 0; i < n; i++)
- q[i] = be16_to_cpu(p[i]);
-
- if (startpx != 0 || set_pkeys(dev->dd, q, port) != 0)
- smp->status |= IB_SMP_INVALID_FIELD;
-
- return recv_subn_get_pkeytable(smp, ibdev);
-}
-
-static int recv_pma_get_classportinfo(struct ib_pma_mad *pmp)
-{
- struct ib_class_port_info *p =
- (struct ib_class_port_info *)pmp->data;
-
- memset(pmp->data, 0, sizeof(pmp->data));
-
- if (pmp->mad_hdr.attr_mod != 0)
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-
- /* Indicate AllPortSelect is valid (only one port anyway) */
- p->capability_mask = cpu_to_be16(1 << 8);
- p->base_version = 1;
- p->class_version = 1;
- /*
- * Expected response time is 4.096 usec. * 2^18 == 1.073741824
- * sec.
- */
- p->resp_time_value = 18;
-
- return reply((struct ib_smp *) pmp);
-}
-
-/*
- * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
- * which specify the N'th counter's capabilities. See ch. 16.1.3.2.
- * We support 5 counters which only count the mandatory quantities.
- */
-#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
-#define COUNTER_MASK0_9 cpu_to_be32(COUNTER_MASK(1, 0) | \
- COUNTER_MASK(1, 1) | \
- COUNTER_MASK(1, 2) | \
- COUNTER_MASK(1, 3) | \
- COUNTER_MASK(1, 4))
-
-static int recv_pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portsamplescontrol *p =
- (struct ib_pma_portsamplescontrol *)pmp->data;
- struct ipath_ibdev *dev = to_idev(ibdev);
- struct ipath_cregs const *crp = dev->dd->ipath_cregs;
- unsigned long flags;
- u8 port_select = p->port_select;
-
- memset(pmp->data, 0, sizeof(pmp->data));
-
- p->port_select = port_select;
- if (pmp->mad_hdr.attr_mod != 0 ||
- (port_select != port && port_select != 0xFF))
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- /*
- * Ticks are 10x the link transfer period which for 2.5Gbs is 4
- * nsec. 0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec. Sample
- * intervals are counted in ticks. Since we use Linux timers, that
- * count in jiffies, we can't sample for less than 1000 ticks if HZ
- * == 1000 (4000 ticks if HZ is 250). link_speed_active returns 2 for
- * DDR, 1 for SDR, set the tick to 1 for DDR, 0 for SDR on chips that
- * have hardware support for delaying packets.
- */
- if (crp->cr_psstat)
- p->tick = dev->dd->ipath_link_speed_active - 1;
- else
- p->tick = 250; /* 1 usec. */
- p->counter_width = 4; /* 32 bit counters */
- p->counter_mask0_9 = COUNTER_MASK0_9;
- spin_lock_irqsave(&dev->pending_lock, flags);
- if (crp->cr_psstat)
- p->sample_status = ipath_read_creg32(dev->dd, crp->cr_psstat);
- else
- p->sample_status = dev->pma_sample_status;
- p->sample_start = cpu_to_be32(dev->pma_sample_start);
- p->sample_interval = cpu_to_be32(dev->pma_sample_interval);
- p->tag = cpu_to_be16(dev->pma_tag);
- p->counter_select[0] = dev->pma_counter_select[0];
- p->counter_select[1] = dev->pma_counter_select[1];
- p->counter_select[2] = dev->pma_counter_select[2];
- p->counter_select[3] = dev->pma_counter_select[3];
- p->counter_select[4] = dev->pma_counter_select[4];
- spin_unlock_irqrestore(&dev->pending_lock, flags);
-
- return reply((struct ib_smp *) pmp);
-}
-
-static int recv_pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portsamplescontrol *p =
- (struct ib_pma_portsamplescontrol *)pmp->data;
- struct ipath_ibdev *dev = to_idev(ibdev);
- struct ipath_cregs const *crp = dev->dd->ipath_cregs;
- unsigned long flags;
- u8 status;
- int ret;
-
- if (pmp->mad_hdr.attr_mod != 0 ||
- (p->port_select != port && p->port_select != 0xFF)) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- ret = reply((struct ib_smp *) pmp);
- goto bail;
- }
-
- spin_lock_irqsave(&dev->pending_lock, flags);
- if (crp->cr_psstat)
- status = ipath_read_creg32(dev->dd, crp->cr_psstat);
- else
- status = dev->pma_sample_status;
- if (status == IB_PMA_SAMPLE_STATUS_DONE) {
- dev->pma_sample_start = be32_to_cpu(p->sample_start);
- dev->pma_sample_interval = be32_to_cpu(p->sample_interval);
- dev->pma_tag = be16_to_cpu(p->tag);
- dev->pma_counter_select[0] = p->counter_select[0];
- dev->pma_counter_select[1] = p->counter_select[1];
- dev->pma_counter_select[2] = p->counter_select[2];
- dev->pma_counter_select[3] = p->counter_select[3];
- dev->pma_counter_select[4] = p->counter_select[4];
- if (crp->cr_psstat) {
- ipath_write_creg(dev->dd, crp->cr_psinterval,
- dev->pma_sample_interval);
- ipath_write_creg(dev->dd, crp->cr_psstart,
- dev->pma_sample_start);
- } else
- dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED;
- }
- spin_unlock_irqrestore(&dev->pending_lock, flags);
-
- ret = recv_pma_get_portsamplescontrol(pmp, ibdev, port);
-
-bail:
- return ret;
-}
-
-static u64 get_counter(struct ipath_ibdev *dev,
- struct ipath_cregs const *crp,
- __be16 sel)
-{
- u64 ret;
-
- switch (sel) {
- case IB_PMA_PORT_XMIT_DATA:
- ret = (crp->cr_psxmitdatacount) ?
- ipath_read_creg32(dev->dd, crp->cr_psxmitdatacount) :
- dev->ipath_sword;
- break;
- case IB_PMA_PORT_RCV_DATA:
- ret = (crp->cr_psrcvdatacount) ?
- ipath_read_creg32(dev->dd, crp->cr_psrcvdatacount) :
- dev->ipath_rword;
- break;
- case IB_PMA_PORT_XMIT_PKTS:
- ret = (crp->cr_psxmitpktscount) ?
- ipath_read_creg32(dev->dd, crp->cr_psxmitpktscount) :
- dev->ipath_spkts;
- break;
- case IB_PMA_PORT_RCV_PKTS:
- ret = (crp->cr_psrcvpktscount) ?
- ipath_read_creg32(dev->dd, crp->cr_psrcvpktscount) :
- dev->ipath_rpkts;
- break;
- case IB_PMA_PORT_XMIT_WAIT:
- ret = (crp->cr_psxmitwaitcount) ?
- ipath_read_creg32(dev->dd, crp->cr_psxmitwaitcount) :
- dev->ipath_xmit_wait;
- break;
- default:
- ret = 0;
- }
-
- return ret;
-}
-
-static int recv_pma_get_portsamplesresult(struct ib_pma_mad *pmp,
- struct ib_device *ibdev)
-{
- struct ib_pma_portsamplesresult *p =
- (struct ib_pma_portsamplesresult *)pmp->data;
- struct ipath_ibdev *dev = to_idev(ibdev);
- struct ipath_cregs const *crp = dev->dd->ipath_cregs;
- u8 status;
- int i;
-
- memset(pmp->data, 0, sizeof(pmp->data));
- p->tag = cpu_to_be16(dev->pma_tag);
- if (crp->cr_psstat)
- status = ipath_read_creg32(dev->dd, crp->cr_psstat);
- else
- status = dev->pma_sample_status;
- p->sample_status = cpu_to_be16(status);
- for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
- p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
- cpu_to_be32(
- get_counter(dev, crp, dev->pma_counter_select[i]));
-
- return reply((struct ib_smp *) pmp);
-}
-
-static int recv_pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
- struct ib_device *ibdev)
-{
- struct ib_pma_portsamplesresult_ext *p =
- (struct ib_pma_portsamplesresult_ext *)pmp->data;
- struct ipath_ibdev *dev = to_idev(ibdev);
- struct ipath_cregs const *crp = dev->dd->ipath_cregs;
- u8 status;
- int i;
-
- memset(pmp->data, 0, sizeof(pmp->data));
- p->tag = cpu_to_be16(dev->pma_tag);
- if (crp->cr_psstat)
- status = ipath_read_creg32(dev->dd, crp->cr_psstat);
- else
- status = dev->pma_sample_status;
- p->sample_status = cpu_to_be16(status);
- /* 64 bits */
- p->extended_width = cpu_to_be32(0x80000000);
- for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
- p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
- cpu_to_be64(
- get_counter(dev, crp, dev->pma_counter_select[i]));
-
- return reply((struct ib_smp *) pmp);
-}
-
-static int recv_pma_get_portcounters(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
- pmp->data;
- struct ipath_ibdev *dev = to_idev(ibdev);
- struct ipath_verbs_counters cntrs;
- u8 port_select = p->port_select;
-
- ipath_get_counters(dev->dd, &cntrs);
-
- /* Adjust counters for any resets done. */
- cntrs.symbol_error_counter -= dev->z_symbol_error_counter;
- cntrs.link_error_recovery_counter -=
- dev->z_link_error_recovery_counter;
- cntrs.link_downed_counter -= dev->z_link_downed_counter;
- cntrs.port_rcv_errors += dev->rcv_errors;
- cntrs.port_rcv_errors -= dev->z_port_rcv_errors;
- cntrs.port_rcv_remphys_errors -= dev->z_port_rcv_remphys_errors;
- cntrs.port_xmit_discards -= dev->z_port_xmit_discards;
- cntrs.port_xmit_data -= dev->z_port_xmit_data;
- cntrs.port_rcv_data -= dev->z_port_rcv_data;
- cntrs.port_xmit_packets -= dev->z_port_xmit_packets;
- cntrs.port_rcv_packets -= dev->z_port_rcv_packets;
- cntrs.local_link_integrity_errors -=
- dev->z_local_link_integrity_errors;
- cntrs.excessive_buffer_overrun_errors -=
- dev->z_excessive_buffer_overrun_errors;
- cntrs.vl15_dropped -= dev->z_vl15_dropped;
- cntrs.vl15_dropped += dev->n_vl15_dropped;
-
- memset(pmp->data, 0, sizeof(pmp->data));
-
- p->port_select = port_select;
- if (pmp->mad_hdr.attr_mod != 0 ||
- (port_select != port && port_select != 0xFF))
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-
- if (cntrs.symbol_error_counter > 0xFFFFUL)
- p->symbol_error_counter = cpu_to_be16(0xFFFF);
- else
- p->symbol_error_counter =
- cpu_to_be16((u16)cntrs.symbol_error_counter);
- if (cntrs.link_error_recovery_counter > 0xFFUL)
- p->link_error_recovery_counter = 0xFF;
- else
- p->link_error_recovery_counter =
- (u8)cntrs.link_error_recovery_counter;
- if (cntrs.link_downed_counter > 0xFFUL)
- p->link_downed_counter = 0xFF;
- else
- p->link_downed_counter = (u8)cntrs.link_downed_counter;
- if (cntrs.port_rcv_errors > 0xFFFFUL)
- p->port_rcv_errors = cpu_to_be16(0xFFFF);
- else
- p->port_rcv_errors =
- cpu_to_be16((u16) cntrs.port_rcv_errors);
- if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
- p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
- else
- p->port_rcv_remphys_errors =
- cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
- if (cntrs.port_xmit_discards > 0xFFFFUL)
- p->port_xmit_discards = cpu_to_be16(0xFFFF);
- else
- p->port_xmit_discards =
- cpu_to_be16((u16)cntrs.port_xmit_discards);
- if (cntrs.local_link_integrity_errors > 0xFUL)
- cntrs.local_link_integrity_errors = 0xFUL;
- if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
- cntrs.excessive_buffer_overrun_errors = 0xFUL;
- p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
- cntrs.excessive_buffer_overrun_errors;
- if (cntrs.vl15_dropped > 0xFFFFUL)
- p->vl15_dropped = cpu_to_be16(0xFFFF);
- else
- p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
- if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
- p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
- else
- p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
- if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
- p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
- else
- p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
- if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
- p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
- else
- p->port_xmit_packets =
- cpu_to_be32((u32)cntrs.port_xmit_packets);
- if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
- p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
- else
- p->port_rcv_packets =
- cpu_to_be32((u32) cntrs.port_rcv_packets);
-
- return reply((struct ib_smp *) pmp);
-}
-
-static int recv_pma_get_portcounters_ext(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portcounters_ext *p =
- (struct ib_pma_portcounters_ext *)pmp->data;
- struct ipath_ibdev *dev = to_idev(ibdev);
- u64 swords, rwords, spkts, rpkts, xwait;
- u8 port_select = p->port_select;
-
- ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
- &rpkts, &xwait);
-
- /* Adjust counters for any resets done. */
- swords -= dev->z_port_xmit_data;
- rwords -= dev->z_port_rcv_data;
- spkts -= dev->z_port_xmit_packets;
- rpkts -= dev->z_port_rcv_packets;
-
- memset(pmp->data, 0, sizeof(pmp->data));
-
- p->port_select = port_select;
- if (pmp->mad_hdr.attr_mod != 0 ||
- (port_select != port && port_select != 0xFF))
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-
- p->port_xmit_data = cpu_to_be64(swords);
- p->port_rcv_data = cpu_to_be64(rwords);
- p->port_xmit_packets = cpu_to_be64(spkts);
- p->port_rcv_packets = cpu_to_be64(rpkts);
- p->port_unicast_xmit_packets = cpu_to_be64(dev->n_unicast_xmit);
- p->port_unicast_rcv_packets = cpu_to_be64(dev->n_unicast_rcv);
- p->port_multicast_xmit_packets = cpu_to_be64(dev->n_multicast_xmit);
- p->port_multicast_rcv_packets = cpu_to_be64(dev->n_multicast_rcv);
-
- return reply((struct ib_smp *) pmp);
-}
-
-static int recv_pma_set_portcounters(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
- pmp->data;
- struct ipath_ibdev *dev = to_idev(ibdev);
- struct ipath_verbs_counters cntrs;
-
- /*
- * Since the HW doesn't support clearing counters, we save the
- * current count and subtract it from future responses.
- */
- ipath_get_counters(dev->dd, &cntrs);
-
- if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
- dev->z_symbol_error_counter = cntrs.symbol_error_counter;
-
- if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
- dev->z_link_error_recovery_counter =
- cntrs.link_error_recovery_counter;
-
- if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
- dev->z_link_downed_counter = cntrs.link_downed_counter;
-
- if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
- dev->z_port_rcv_errors =
- cntrs.port_rcv_errors + dev->rcv_errors;
-
- if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
- dev->z_port_rcv_remphys_errors =
- cntrs.port_rcv_remphys_errors;
-
- if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
- dev->z_port_xmit_discards = cntrs.port_xmit_discards;
-
- if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
- dev->z_local_link_integrity_errors =
- cntrs.local_link_integrity_errors;
-
- if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
- dev->z_excessive_buffer_overrun_errors =
- cntrs.excessive_buffer_overrun_errors;
-
- if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
- dev->n_vl15_dropped = 0;
- dev->z_vl15_dropped = cntrs.vl15_dropped;
- }
-
- if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
- dev->z_port_xmit_data = cntrs.port_xmit_data;
-
- if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
- dev->z_port_rcv_data = cntrs.port_rcv_data;
-
- if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
- dev->z_port_xmit_packets = cntrs.port_xmit_packets;
-
- if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
- dev->z_port_rcv_packets = cntrs.port_rcv_packets;
-
- return recv_pma_get_portcounters(pmp, ibdev, port);
-}
-
-static int recv_pma_set_portcounters_ext(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
- pmp->data;
- struct ipath_ibdev *dev = to_idev(ibdev);
- u64 swords, rwords, spkts, rpkts, xwait;
-
- ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
- &rpkts, &xwait);
-
- if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
- dev->z_port_xmit_data = swords;
-
- if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
- dev->z_port_rcv_data = rwords;
-
- if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
- dev->z_port_xmit_packets = spkts;
-
- if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
- dev->z_port_rcv_packets = rpkts;
-
- if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
- dev->n_unicast_xmit = 0;
-
- if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
- dev->n_unicast_rcv = 0;
-
- if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
- dev->n_multicast_xmit = 0;
-
- if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
- dev->n_multicast_rcv = 0;
-
- return recv_pma_get_portcounters_ext(pmp, ibdev, port);
-}
-
-static int process_subn(struct ib_device *ibdev, int mad_flags,
- u8 port_num, const struct ib_mad *in_mad,
- struct ib_mad *out_mad)
-{
- struct ib_smp *smp = (struct ib_smp *)out_mad;
- struct ipath_ibdev *dev = to_idev(ibdev);
- int ret;
-
- *out_mad = *in_mad;
- if (smp->class_version != 1) {
- smp->status |= IB_SMP_UNSUP_VERSION;
- ret = reply(smp);
- goto bail;
- }
-
- /* Is the mkey in the process of expiring? */
- if (dev->mkey_lease_timeout &&
- time_after_eq(jiffies, dev->mkey_lease_timeout)) {
- /* Clear timeout and mkey protection field. */
- dev->mkey_lease_timeout = 0;
- dev->mkeyprot = 0;
- }
-
- /*
- * M_Key checking depends on
- * Portinfo:M_Key_protect_bits
- */
- if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && dev->mkey != 0 &&
- dev->mkey != smp->mkey &&
- (smp->method == IB_MGMT_METHOD_SET ||
- (smp->method == IB_MGMT_METHOD_GET &&
- dev->mkeyprot >= 2))) {
- if (dev->mkey_violations != 0xFFFF)
- ++dev->mkey_violations;
- if (dev->mkey_lease_timeout ||
- dev->mkey_lease_period == 0) {
- ret = IB_MAD_RESULT_SUCCESS |
- IB_MAD_RESULT_CONSUMED;
- goto bail;
- }
- dev->mkey_lease_timeout = jiffies +
- dev->mkey_lease_period * HZ;
- /* Future: Generate a trap notice. */
- ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
- goto bail;
- } else if (dev->mkey_lease_timeout)
- dev->mkey_lease_timeout = 0;
-
- switch (smp->method) {
- case IB_MGMT_METHOD_GET:
- switch (smp->attr_id) {
- case IB_SMP_ATTR_NODE_DESC:
- ret = recv_subn_get_nodedescription(smp, ibdev);
- goto bail;
- case IB_SMP_ATTR_NODE_INFO:
- ret = recv_subn_get_nodeinfo(smp, ibdev, port_num);
- goto bail;
- case IB_SMP_ATTR_GUID_INFO:
- ret = recv_subn_get_guidinfo(smp, ibdev);
- goto bail;
- case IB_SMP_ATTR_PORT_INFO:
- ret = recv_subn_get_portinfo(smp, ibdev, port_num);
- goto bail;
- case IB_SMP_ATTR_PKEY_TABLE:
- ret = recv_subn_get_pkeytable(smp, ibdev);
- goto bail;
- case IB_SMP_ATTR_SM_INFO:
- if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
- ret = IB_MAD_RESULT_SUCCESS |
- IB_MAD_RESULT_CONSUMED;
- goto bail;
- }
- if (dev->port_cap_flags & IB_PORT_SM) {
- ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
- }
- /* FALLTHROUGH */
- default:
- smp->status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply(smp);
- goto bail;
- }
-
- case IB_MGMT_METHOD_SET:
- switch (smp->attr_id) {
- case IB_SMP_ATTR_GUID_INFO:
- ret = recv_subn_set_guidinfo(smp, ibdev);
- goto bail;
- case IB_SMP_ATTR_PORT_INFO:
- ret = recv_subn_set_portinfo(smp, ibdev, port_num);
- goto bail;
- case IB_SMP_ATTR_PKEY_TABLE:
- ret = recv_subn_set_pkeytable(smp, ibdev, port_num);
- goto bail;
- case IB_SMP_ATTR_SM_INFO:
- if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
- ret = IB_MAD_RESULT_SUCCESS |
- IB_MAD_RESULT_CONSUMED;
- goto bail;
- }
- if (dev->port_cap_flags & IB_PORT_SM) {
- ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
- }
- /* FALLTHROUGH */
- default:
- smp->status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply(smp);
- goto bail;
- }
-
- case IB_MGMT_METHOD_TRAP:
- case IB_MGMT_METHOD_REPORT:
- case IB_MGMT_METHOD_REPORT_RESP:
- case IB_MGMT_METHOD_TRAP_REPRESS:
- case IB_MGMT_METHOD_GET_RESP:
- /*
- * The ib_mad module will call us to process responses
- * before checking for other consumers.
- * Just tell the caller to process it normally.
- */
- ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
- default:
- smp->status |= IB_SMP_UNSUP_METHOD;
- ret = reply(smp);
- }
-
-bail:
- return ret;
-}
-
-static int process_perf(struct ib_device *ibdev, u8 port_num,
- const struct ib_mad *in_mad,
- struct ib_mad *out_mad)
-{
- struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
- int ret;
-
- *out_mad = *in_mad;
- if (pmp->mad_hdr.class_version != 1) {
- pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
- ret = reply((struct ib_smp *) pmp);
- goto bail;
- }
-
- switch (pmp->mad_hdr.method) {
- case IB_MGMT_METHOD_GET:
- switch (pmp->mad_hdr.attr_id) {
- case IB_PMA_CLASS_PORT_INFO:
- ret = recv_pma_get_classportinfo(pmp);
- goto bail;
- case IB_PMA_PORT_SAMPLES_CONTROL:
- ret = recv_pma_get_portsamplescontrol(pmp, ibdev,
- port_num);
- goto bail;
- case IB_PMA_PORT_SAMPLES_RESULT:
- ret = recv_pma_get_portsamplesresult(pmp, ibdev);
- goto bail;
- case IB_PMA_PORT_SAMPLES_RESULT_EXT:
- ret = recv_pma_get_portsamplesresult_ext(pmp,
- ibdev);
- goto bail;
- case IB_PMA_PORT_COUNTERS:
- ret = recv_pma_get_portcounters(pmp, ibdev,
- port_num);
- goto bail;
- case IB_PMA_PORT_COUNTERS_EXT:
- ret = recv_pma_get_portcounters_ext(pmp, ibdev,
- port_num);
- goto bail;
- default:
- pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply((struct ib_smp *) pmp);
- goto bail;
- }
-
- case IB_MGMT_METHOD_SET:
- switch (pmp->mad_hdr.attr_id) {
- case IB_PMA_PORT_SAMPLES_CONTROL:
- ret = recv_pma_set_portsamplescontrol(pmp, ibdev,
- port_num);
- goto bail;
- case IB_PMA_PORT_COUNTERS:
- ret = recv_pma_set_portcounters(pmp, ibdev,
- port_num);
- goto bail;
- case IB_PMA_PORT_COUNTERS_EXT:
- ret = recv_pma_set_portcounters_ext(pmp, ibdev,
- port_num);
- goto bail;
- default:
- pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply((struct ib_smp *) pmp);
- goto bail;
- }
-
- case IB_MGMT_METHOD_GET_RESP:
- /*
- * The ib_mad module will call us to process responses
- * before checking for other consumers.
- * Just tell the caller to process it normally.
- */
- ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
- default:
- pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
- ret = reply((struct ib_smp *) pmp);
- }
-
-bail:
- return ret;
-}
-
-/**
- * ipath_process_mad - process an incoming MAD packet
- * @ibdev: the infiniband device this packet came in on
- * @mad_flags: MAD flags
- * @port_num: the port number this packet came in on
- * @in_wc: the work completion entry for this packet
- * @in_grh: the global route header for this packet
- * @in_mad: the incoming MAD
- * @out_mad: any outgoing MAD reply
- *
- * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
- * interested in processing.
- *
- * Note that the verbs framework has already done the MAD sanity checks,
- * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
- * MADs.
- *
- * This is called by the ib_mad module.
- */
-int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
- const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
-{
- int ret;
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
-
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
-
- switch (in_mad->mad_hdr.mgmt_class) {
- case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
- case IB_MGMT_CLASS_SUBN_LID_ROUTED:
- ret = process_subn(ibdev, mad_flags, port_num,
- in_mad, out_mad);
- goto bail;
- case IB_MGMT_CLASS_PERF_MGMT:
- ret = process_perf(ibdev, port_num, in_mad, out_mad);
- goto bail;
- default:
- ret = IB_MAD_RESULT_SUCCESS;
- }
-
-bail:
- return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_mmap.c b/drivers/staging/rdma/ipath/ipath_mmap.c
deleted file mode 100644
index e73274229404..000000000000
--- a/drivers/staging/rdma/ipath/ipath_mmap.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <asm/pgtable.h>
-
-#include "ipath_verbs.h"
-
-/**
- * ipath_release_mmap_info - free mmap info structure
- * @ref: a pointer to the kref within struct ipath_mmap_info
- */
-void ipath_release_mmap_info(struct kref *ref)
-{
- struct ipath_mmap_info *ip =
- container_of(ref, struct ipath_mmap_info, ref);
- struct ipath_ibdev *dev = to_idev(ip->context->device);
-
- spin_lock_irq(&dev->pending_lock);
- list_del(&ip->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
-
- vfree(ip->obj);
- kfree(ip);
-}
-
-/*
- * open and close keep track of how many times the CQ is mapped,
- * to avoid releasing it.
- */
-static void ipath_vma_open(struct vm_area_struct *vma)
-{
- struct ipath_mmap_info *ip = vma->vm_private_data;
-
- kref_get(&ip->ref);
-}
-
-static void ipath_vma_close(struct vm_area_struct *vma)
-{
- struct ipath_mmap_info *ip = vma->vm_private_data;
-
- kref_put(&ip->ref, ipath_release_mmap_info);
-}
-
-static const struct vm_operations_struct ipath_vm_ops = {
- .open = ipath_vma_open,
- .close = ipath_vma_close,
-};
-
-/**
- * ipath_mmap - create a new mmap region
- * @context: the IB user context of the process making the mmap() call
- * @vma: the VMA to be initialized
- * Return zero if the mmap is OK. Otherwise, return an errno.
- */
-int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
-{
- struct ipath_ibdev *dev = to_idev(context->device);
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
- unsigned long size = vma->vm_end - vma->vm_start;
- struct ipath_mmap_info *ip, *pp;
- int ret = -EINVAL;
-
- /*
- * Search the device's list of objects waiting for a mmap call.
- * Normally, this list is very short since a call to create a
- * CQ, QP, or SRQ is soon followed by a call to mmap().
- */
- spin_lock_irq(&dev->pending_lock);
- list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
- pending_mmaps) {
- /* Only the creator is allowed to mmap the object */
- if (context != ip->context || (__u64) offset != ip->offset)
- continue;
- /* Don't allow a mmap larger than the object. */
- if (size > ip->size)
- break;
-
- list_del_init(&ip->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
-
- ret = remap_vmalloc_range(vma, ip->obj, 0);
- if (ret)
- goto done;
- vma->vm_ops = &ipath_vm_ops;
- vma->vm_private_data = ip;
- ipath_vma_open(vma);
- goto done;
- }
- spin_unlock_irq(&dev->pending_lock);
-done:
- return ret;
-}
-
-/*
- * Allocate information for ipath_mmap
- */
-struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
- u32 size,
- struct ib_ucontext *context,
- void *obj) {
- struct ipath_mmap_info *ip;
-
- ip = kmalloc(sizeof *ip, GFP_KERNEL);
- if (!ip)
- goto bail;
-
- size = PAGE_ALIGN(size);
-
- spin_lock_irq(&dev->mmap_offset_lock);
- if (dev->mmap_offset == 0)
- dev->mmap_offset = PAGE_SIZE;
- ip->offset = dev->mmap_offset;
- dev->mmap_offset += size;
- spin_unlock_irq(&dev->mmap_offset_lock);
-
- INIT_LIST_HEAD(&ip->pending_mmaps);
- ip->size = size;
- ip->context = context;
- ip->obj = obj;
- kref_init(&ip->ref);
-
-bail:
- return ip;
-}
-
-void ipath_update_mmap_info(struct ipath_ibdev *dev,
- struct ipath_mmap_info *ip,
- u32 size, void *obj) {
- size = PAGE_ALIGN(size);
-
- spin_lock_irq(&dev->mmap_offset_lock);
- if (dev->mmap_offset == 0)
- dev->mmap_offset = PAGE_SIZE;
- ip->offset = dev->mmap_offset;
- dev->mmap_offset += size;
- spin_unlock_irq(&dev->mmap_offset_lock);
-
- ip->size = size;
- ip->obj = obj;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_mr.c b/drivers/staging/rdma/ipath/ipath_mr.c
deleted file mode 100644
index b76b0ce66709..000000000000
--- a/drivers/staging/rdma/ipath/ipath_mr.c
+++ /dev/null
@@ -1,370 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/slab.h>
-
-#include <rdma/ib_umem.h>
-#include <rdma/ib_pack.h>
-#include <rdma/ib_smi.h>
-
-#include "ipath_verbs.h"
-
-/* Fast memory region */
-struct ipath_fmr {
- struct ib_fmr ibfmr;
- u8 page_shift;
- struct ipath_mregion mr; /* must be last */
-};
-
-static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
-{
- return container_of(ibfmr, struct ipath_fmr, ibfmr);
-}
-
-/**
- * ipath_get_dma_mr - get a DMA memory region
- * @pd: protection domain for this memory region
- * @acc: access flags
- *
- * Returns the memory region on success, otherwise returns an errno.
- * Note that all DMA addresses should be created via the
- * struct ib_dma_mapping_ops functions (see ipath_dma.c).
- */
-struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc)
-{
- struct ipath_mr *mr;
- struct ib_mr *ret;
-
- mr = kzalloc(sizeof *mr, GFP_KERNEL);
- if (!mr) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- mr->mr.access_flags = acc;
- ret = &mr->ibmr;
-
-bail:
- return ret;
-}
-
-static struct ipath_mr *alloc_mr(int count,
- struct ipath_lkey_table *lk_table)
-{
- struct ipath_mr *mr;
- int m, i = 0;
-
- /* Allocate struct plus pointers to first level page tables. */
- m = (count + IPATH_SEGSZ - 1) / IPATH_SEGSZ;
- mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
- if (!mr)
- goto done;
-
- /* Allocate first level page tables. */
- for (; i < m; i++) {
- mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
- if (!mr->mr.map[i])
- goto bail;
- }
- mr->mr.mapsz = m;
-
- if (!ipath_alloc_lkey(lk_table, &mr->mr))
- goto bail;
- mr->ibmr.rkey = mr->ibmr.lkey = mr->mr.lkey;
-
- goto done;
-
-bail:
- while (i) {
- i--;
- kfree(mr->mr.map[i]);
- }
- kfree(mr);
- mr = NULL;
-
-done:
- return mr;
-}
-
-/**
- * ipath_reg_user_mr - register a userspace memory region
- * @pd: protection domain for this memory region
- * @start: starting userspace address
- * @length: length of region to register
- * @virt_addr: virtual address to use (from HCA's point of view)
- * @mr_access_flags: access flags for this memory region
- * @udata: unused by the InfiniPath driver
- *
- * Returns the memory region on success, otherwise returns an errno.
- */
-struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt_addr, int mr_access_flags,
- struct ib_udata *udata)
-{
- struct ipath_mr *mr;
- struct ib_umem *umem;
- int n, m, entry;
- struct scatterlist *sg;
- struct ib_mr *ret;
-
- if (length == 0) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
-
- umem = ib_umem_get(pd->uobject->context, start, length,
- mr_access_flags, 0);
- if (IS_ERR(umem))
- return (void *) umem;
-
- n = umem->nmap;
- mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
- if (!mr) {
- ret = ERR_PTR(-ENOMEM);
- ib_umem_release(umem);
- goto bail;
- }
-
- mr->mr.pd = pd;
- mr->mr.user_base = start;
- mr->mr.iova = virt_addr;
- mr->mr.length = length;
- mr->mr.offset = ib_umem_offset(umem);
- mr->mr.access_flags = mr_access_flags;
- mr->mr.max_segs = n;
- mr->umem = umem;
-
- m = 0;
- n = 0;
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- void *vaddr;
-
- vaddr = page_address(sg_page(sg));
- if (!vaddr) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
- mr->mr.map[m]->segs[n].vaddr = vaddr;
- mr->mr.map[m]->segs[n].length = umem->page_size;
- n++;
- if (n == IPATH_SEGSZ) {
- m++;
- n = 0;
- }
- }
- ret = &mr->ibmr;
-
-bail:
- return ret;
-}
-
-/**
- * ipath_dereg_mr - unregister and free a memory region
- * @ibmr: the memory region to free
- *
- * Returns 0 on success.
- *
- * Note that this is called to free MRs created by ipath_get_dma_mr()
- * or ipath_reg_user_mr().
- */
-int ipath_dereg_mr(struct ib_mr *ibmr)
-{
- struct ipath_mr *mr = to_imr(ibmr);
- int i;
-
- ipath_free_lkey(&to_idev(ibmr->device)->lk_table, ibmr->lkey);
- i = mr->mr.mapsz;
- while (i) {
- i--;
- kfree(mr->mr.map[i]);
- }
-
- if (mr->umem)
- ib_umem_release(mr->umem);
-
- kfree(mr);
- return 0;
-}
-
-/**
- * ipath_alloc_fmr - allocate a fast memory region
- * @pd: the protection domain for this memory region
- * @mr_access_flags: access flags for this memory region
- * @fmr_attr: fast memory region attributes
- *
- * Returns the memory region on success, otherwise returns an errno.
- */
-struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr)
-{
- struct ipath_fmr *fmr;
- int m, i = 0;
- struct ib_fmr *ret;
-
- /* Allocate struct plus pointers to first level page tables. */
- m = (fmr_attr->max_pages + IPATH_SEGSZ - 1) / IPATH_SEGSZ;
- fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
- if (!fmr)
- goto bail;
-
- /* Allocate first level page tables. */
- for (; i < m; i++) {
- fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
- GFP_KERNEL);
- if (!fmr->mr.map[i])
- goto bail;
- }
- fmr->mr.mapsz = m;
-
- /*
- * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
- * rkey.
- */
- if (!ipath_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
- goto bail;
- fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mr.lkey;
- /*
- * Resources are allocated but no valid mapping (RKEY can't be
- * used).
- */
- fmr->mr.pd = pd;
- fmr->mr.user_base = 0;
- fmr->mr.iova = 0;
- fmr->mr.length = 0;
- fmr->mr.offset = 0;
- fmr->mr.access_flags = mr_access_flags;
- fmr->mr.max_segs = fmr_attr->max_pages;
- fmr->page_shift = fmr_attr->page_shift;
-
- ret = &fmr->ibfmr;
- goto done;
-
-bail:
- while (i)
- kfree(fmr->mr.map[--i]);
- kfree(fmr);
- ret = ERR_PTR(-ENOMEM);
-
-done:
- return ret;
-}
-
-/**
- * ipath_map_phys_fmr - set up a fast memory region
- * @ibmfr: the fast memory region to set up
- * @page_list: the list of pages to associate with the fast memory region
- * @list_len: the number of pages to associate with the fast memory region
- * @iova: the virtual address of the start of the fast memory region
- *
- * This may be called from interrupt context.
- */
-
-int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
- int list_len, u64 iova)
-{
- struct ipath_fmr *fmr = to_ifmr(ibfmr);
- struct ipath_lkey_table *rkt;
- unsigned long flags;
- int m, n, i;
- u32 ps;
- int ret;
-
- if (list_len > fmr->mr.max_segs) {
- ret = -EINVAL;
- goto bail;
- }
- rkt = &to_idev(ibfmr->device)->lk_table;
- spin_lock_irqsave(&rkt->lock, flags);
- fmr->mr.user_base = iova;
- fmr->mr.iova = iova;
- ps = 1 << fmr->page_shift;
- fmr->mr.length = list_len * ps;
- m = 0;
- n = 0;
- ps = 1 << fmr->page_shift;
- for (i = 0; i < list_len; i++) {
- fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
- fmr->mr.map[m]->segs[n].length = ps;
- if (++n == IPATH_SEGSZ) {
- m++;
- n = 0;
- }
- }
- spin_unlock_irqrestore(&rkt->lock, flags);
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * ipath_unmap_fmr - unmap fast memory regions
- * @fmr_list: the list of fast memory regions to unmap
- *
- * Returns 0 on success.
- */
-int ipath_unmap_fmr(struct list_head *fmr_list)
-{
- struct ipath_fmr *fmr;
- struct ipath_lkey_table *rkt;
- unsigned long flags;
-
- list_for_each_entry(fmr, fmr_list, ibfmr.list) {
- rkt = &to_idev(fmr->ibfmr.device)->lk_table;
- spin_lock_irqsave(&rkt->lock, flags);
- fmr->mr.user_base = 0;
- fmr->mr.iova = 0;
- fmr->mr.length = 0;
- spin_unlock_irqrestore(&rkt->lock, flags);
- }
- return 0;
-}
-
-/**
- * ipath_dealloc_fmr - deallocate a fast memory region
- * @ibfmr: the fast memory region to deallocate
- *
- * Returns 0 on success.
- */
-int ipath_dealloc_fmr(struct ib_fmr *ibfmr)
-{
- struct ipath_fmr *fmr = to_ifmr(ibfmr);
- int i;
-
- ipath_free_lkey(&to_idev(ibfmr->device)->lk_table, ibfmr->lkey);
- i = fmr->mr.mapsz;
- while (i)
- kfree(fmr->mr.map[--i]);
- kfree(fmr);
- return 0;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_qp.c b/drivers/staging/rdma/ipath/ipath_qp.c
deleted file mode 100644
index 280cd2d638e4..000000000000
--- a/drivers/staging/rdma/ipath/ipath_qp.c
+++ /dev/null
@@ -1,1079 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include "ipath_verbs.h"
-#include "ipath_kernel.h"
-
-#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
-#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
-#define mk_qpn(qpt, map, off) (((map) - (qpt)->map) * BITS_PER_PAGE + \
- (off))
-#define find_next_offset(map, off) find_next_zero_bit((map)->page, \
- BITS_PER_PAGE, off)
-
-/*
- * Convert the AETH credit code into the number of credits.
- */
-static u32 credit_table[31] = {
- 0, /* 0 */
- 1, /* 1 */
- 2, /* 2 */
- 3, /* 3 */
- 4, /* 4 */
- 6, /* 5 */
- 8, /* 6 */
- 12, /* 7 */
- 16, /* 8 */
- 24, /* 9 */
- 32, /* A */
- 48, /* B */
- 64, /* C */
- 96, /* D */
- 128, /* E */
- 192, /* F */
- 256, /* 10 */
- 384, /* 11 */
- 512, /* 12 */
- 768, /* 13 */
- 1024, /* 14 */
- 1536, /* 15 */
- 2048, /* 16 */
- 3072, /* 17 */
- 4096, /* 18 */
- 6144, /* 19 */
- 8192, /* 1A */
- 12288, /* 1B */
- 16384, /* 1C */
- 24576, /* 1D */
- 32768 /* 1E */
-};
-
-
-static void get_map_page(struct ipath_qp_table *qpt, struct qpn_map *map)
-{
- unsigned long page = get_zeroed_page(GFP_KERNEL);
- unsigned long flags;
-
- /*
- * Free the page if someone raced with us installing it.
- */
-
- spin_lock_irqsave(&qpt->lock, flags);
- if (map->page)
- free_page(page);
- else
- map->page = (void *)page;
- spin_unlock_irqrestore(&qpt->lock, flags);
-}
-
-
-static int alloc_qpn(struct ipath_qp_table *qpt, enum ib_qp_type type)
-{
- u32 i, offset, max_scan, qpn;
- struct qpn_map *map;
- u32 ret = -1;
-
- if (type == IB_QPT_SMI)
- ret = 0;
- else if (type == IB_QPT_GSI)
- ret = 1;
-
- if (ret != -1) {
- map = &qpt->map[0];
- if (unlikely(!map->page)) {
- get_map_page(qpt, map);
- if (unlikely(!map->page)) {
- ret = -ENOMEM;
- goto bail;
- }
- }
- if (!test_and_set_bit(ret, map->page))
- atomic_dec(&map->n_free);
- else
- ret = -EBUSY;
- goto bail;
- }
-
- qpn = qpt->last + 1;
- if (qpn >= QPN_MAX)
- qpn = 2;
- offset = qpn & BITS_PER_PAGE_MASK;
- map = &qpt->map[qpn / BITS_PER_PAGE];
- max_scan = qpt->nmaps - !offset;
- for (i = 0;;) {
- if (unlikely(!map->page)) {
- get_map_page(qpt, map);
- if (unlikely(!map->page))
- break;
- }
- if (likely(atomic_read(&map->n_free))) {
- do {
- if (!test_and_set_bit(offset, map->page)) {
- atomic_dec(&map->n_free);
- qpt->last = qpn;
- ret = qpn;
- goto bail;
- }
- offset = find_next_offset(map, offset);
- qpn = mk_qpn(qpt, map, offset);
- /*
- * This test differs from alloc_pidmap().
- * If find_next_offset() does find a zero
- * bit, we don't need to check for QPN
- * wrapping around past our starting QPN.
- * We just need to be sure we don't loop
- * forever.
- */
- } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
- }
- /*
- * In order to keep the number of pages allocated to a
- * minimum, we scan the all existing pages before increasing
- * the size of the bitmap table.
- */
- if (++i > max_scan) {
- if (qpt->nmaps == QPNMAP_ENTRIES)
- break;
- map = &qpt->map[qpt->nmaps++];
- offset = 0;
- } else if (map < &qpt->map[qpt->nmaps]) {
- ++map;
- offset = 0;
- } else {
- map = &qpt->map[0];
- offset = 2;
- }
- qpn = mk_qpn(qpt, map, offset);
- }
-
- ret = -ENOMEM;
-
-bail:
- return ret;
-}
-
-static void free_qpn(struct ipath_qp_table *qpt, u32 qpn)
-{
- struct qpn_map *map;
-
- map = qpt->map + qpn / BITS_PER_PAGE;
- if (map->page)
- clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
- atomic_inc(&map->n_free);
-}
-
-/**
- * ipath_alloc_qpn - allocate a QP number
- * @qpt: the QP table
- * @qp: the QP
- * @type: the QP type (IB_QPT_SMI and IB_QPT_GSI are special)
- *
- * Allocate the next available QPN and put the QP into the hash table.
- * The hash table holds a reference to the QP.
- */
-static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp,
- enum ib_qp_type type)
-{
- unsigned long flags;
- int ret;
-
- ret = alloc_qpn(qpt, type);
- if (ret < 0)
- goto bail;
- qp->ibqp.qp_num = ret;
-
- /* Add the QP to the hash table. */
- spin_lock_irqsave(&qpt->lock, flags);
-
- ret %= qpt->max;
- qp->next = qpt->table[ret];
- qpt->table[ret] = qp;
- atomic_inc(&qp->refcount);
-
- spin_unlock_irqrestore(&qpt->lock, flags);
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * ipath_free_qp - remove a QP from the QP table
- * @qpt: the QP table
- * @qp: the QP to remove
- *
- * Remove the QP from the table so it can't be found asynchronously by
- * the receive interrupt routine.
- */
-static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
-{
- struct ipath_qp *q, **qpp;
- unsigned long flags;
-
- spin_lock_irqsave(&qpt->lock, flags);
-
- /* Remove QP from the hash table. */
- qpp = &qpt->table[qp->ibqp.qp_num % qpt->max];
- for (; (q = *qpp) != NULL; qpp = &q->next) {
- if (q == qp) {
- *qpp = qp->next;
- qp->next = NULL;
- atomic_dec(&qp->refcount);
- break;
- }
- }
-
- spin_unlock_irqrestore(&qpt->lock, flags);
-}
-
-/**
- * ipath_free_all_qps - check for QPs still in use
- * @qpt: the QP table to empty
- *
- * There should not be any QPs still in use.
- * Free memory for table.
- */
-unsigned ipath_free_all_qps(struct ipath_qp_table *qpt)
-{
- unsigned long flags;
- struct ipath_qp *qp;
- u32 n, qp_inuse = 0;
-
- spin_lock_irqsave(&qpt->lock, flags);
- for (n = 0; n < qpt->max; n++) {
- qp = qpt->table[n];
- qpt->table[n] = NULL;
-
- for (; qp; qp = qp->next)
- qp_inuse++;
- }
- spin_unlock_irqrestore(&qpt->lock, flags);
-
- for (n = 0; n < ARRAY_SIZE(qpt->map); n++)
- if (qpt->map[n].page)
- free_page((unsigned long) qpt->map[n].page);
- return qp_inuse;
-}
-
-/**
- * ipath_lookup_qpn - return the QP with the given QPN
- * @qpt: the QP table
- * @qpn: the QP number to look up
- *
- * The caller is responsible for decrementing the QP reference count
- * when done.
- */
-struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn)
-{
- unsigned long flags;
- struct ipath_qp *qp;
-
- spin_lock_irqsave(&qpt->lock, flags);
-
- for (qp = qpt->table[qpn % qpt->max]; qp; qp = qp->next) {
- if (qp->ibqp.qp_num == qpn) {
- atomic_inc(&qp->refcount);
- break;
- }
- }
-
- spin_unlock_irqrestore(&qpt->lock, flags);
- return qp;
-}
-
-/**
- * ipath_reset_qp - initialize the QP state to the reset state
- * @qp: the QP to reset
- * @type: the QP type
- */
-static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
-{
- qp->remote_qpn = 0;
- qp->qkey = 0;
- qp->qp_access_flags = 0;
- atomic_set(&qp->s_dma_busy, 0);
- qp->s_flags &= IPATH_S_SIGNAL_REQ_WR;
- qp->s_hdrwords = 0;
- qp->s_wqe = NULL;
- qp->s_pkt_delay = 0;
- qp->s_draining = 0;
- qp->s_psn = 0;
- qp->r_psn = 0;
- qp->r_msn = 0;
- if (type == IB_QPT_RC) {
- qp->s_state = IB_OPCODE_RC_SEND_LAST;
- qp->r_state = IB_OPCODE_RC_SEND_LAST;
- } else {
- qp->s_state = IB_OPCODE_UC_SEND_LAST;
- qp->r_state = IB_OPCODE_UC_SEND_LAST;
- }
- qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
- qp->r_nak_state = 0;
- qp->r_aflags = 0;
- qp->r_flags = 0;
- qp->s_rnr_timeout = 0;
- qp->s_head = 0;
- qp->s_tail = 0;
- qp->s_cur = 0;
- qp->s_last = 0;
- qp->s_ssn = 1;
- qp->s_lsn = 0;
- memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
- qp->r_head_ack_queue = 0;
- qp->s_tail_ack_queue = 0;
- qp->s_num_rd_atomic = 0;
- if (qp->r_rq.wq) {
- qp->r_rq.wq->head = 0;
- qp->r_rq.wq->tail = 0;
- }
-}
-
-/**
- * ipath_error_qp - put a QP into the error state
- * @qp: the QP to put into the error state
- * @err: the receive completion error to signal if a RWQE is active
- *
- * Flushes both send and receive work queues.
- * Returns true if last WQE event should be generated.
- * The QP s_lock should be held and interrupts disabled.
- * If we are already in error state, just return.
- */
-
-int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
-{
- struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
- struct ib_wc wc;
- int ret = 0;
-
- if (qp->state == IB_QPS_ERR)
- goto bail;
-
- qp->state = IB_QPS_ERR;
-
- spin_lock(&dev->pending_lock);
- if (!list_empty(&qp->timerwait))
- list_del_init(&qp->timerwait);
- if (!list_empty(&qp->piowait))
- list_del_init(&qp->piowait);
- spin_unlock(&dev->pending_lock);
-
- /* Schedule the sending tasklet to drain the send work queue. */
- if (qp->s_last != qp->s_head)
- ipath_schedule_send(qp);
-
- memset(&wc, 0, sizeof(wc));
- wc.qp = &qp->ibqp;
- wc.opcode = IB_WC_RECV;
-
- if (test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) {
- wc.wr_id = qp->r_wr_id;
- wc.status = err;
- ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
- }
- wc.status = IB_WC_WR_FLUSH_ERR;
-
- if (qp->r_rq.wq) {
- struct ipath_rwq *wq;
- u32 head;
- u32 tail;
-
- spin_lock(&qp->r_rq.lock);
-
- /* sanity check pointers before trusting them */
- wq = qp->r_rq.wq;
- head = wq->head;
- if (head >= qp->r_rq.size)
- head = 0;
- tail = wq->tail;
- if (tail >= qp->r_rq.size)
- tail = 0;
- while (tail != head) {
- wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
- if (++tail >= qp->r_rq.size)
- tail = 0;
- ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
- }
- wq->tail = tail;
-
- spin_unlock(&qp->r_rq.lock);
- } else if (qp->ibqp.event_handler)
- ret = 1;
-
-bail:
- return ret;
-}
-
-/**
- * ipath_modify_qp - modify the attributes of a queue pair
- * @ibqp: the queue pair who's attributes we're modifying
- * @attr: the new attributes
- * @attr_mask: the mask of attributes to modify
- * @udata: user data for ipathverbs.so
- *
- * Returns 0 on success, otherwise returns an errno.
- */
-int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata)
-{
- struct ipath_ibdev *dev = to_idev(ibqp->device);
- struct ipath_qp *qp = to_iqp(ibqp);
- enum ib_qp_state cur_state, new_state;
- int lastwqe = 0;
- int ret;
-
- spin_lock_irq(&qp->s_lock);
-
- cur_state = attr_mask & IB_QP_CUR_STATE ?
- attr->cur_qp_state : qp->state;
- new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
-
- if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
- attr_mask, IB_LINK_LAYER_UNSPECIFIED))
- goto inval;
-
- if (attr_mask & IB_QP_AV) {
- if (attr->ah_attr.dlid == 0 ||
- attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE)
- goto inval;
-
- if ((attr->ah_attr.ah_flags & IB_AH_GRH) &&
- (attr->ah_attr.grh.sgid_index > 1))
- goto inval;
- }
-
- if (attr_mask & IB_QP_PKEY_INDEX)
- if (attr->pkey_index >= ipath_get_npkeys(dev->dd))
- goto inval;
-
- if (attr_mask & IB_QP_MIN_RNR_TIMER)
- if (attr->min_rnr_timer > 31)
- goto inval;
-
- if (attr_mask & IB_QP_PORT)
- if (attr->port_num == 0 ||
- attr->port_num > ibqp->device->phys_port_cnt)
- goto inval;
-
- /*
- * don't allow invalid Path MTU values or greater than 2048
- * unless we are configured for a 4KB MTU
- */
- if ((attr_mask & IB_QP_PATH_MTU) &&
- (ib_mtu_enum_to_int(attr->path_mtu) == -1 ||
- (attr->path_mtu > IB_MTU_2048 && !ipath_mtu4096)))
- goto inval;
-
- if (attr_mask & IB_QP_PATH_MIG_STATE)
- if (attr->path_mig_state != IB_MIG_MIGRATED &&
- attr->path_mig_state != IB_MIG_REARM)
- goto inval;
-
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
- if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC)
- goto inval;
-
- switch (new_state) {
- case IB_QPS_RESET:
- if (qp->state != IB_QPS_RESET) {
- qp->state = IB_QPS_RESET;
- spin_lock(&dev->pending_lock);
- if (!list_empty(&qp->timerwait))
- list_del_init(&qp->timerwait);
- if (!list_empty(&qp->piowait))
- list_del_init(&qp->piowait);
- spin_unlock(&dev->pending_lock);
- qp->s_flags &= ~IPATH_S_ANY_WAIT;
- spin_unlock_irq(&qp->s_lock);
- /* Stop the sending tasklet */
- tasklet_kill(&qp->s_task);
- wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
- spin_lock_irq(&qp->s_lock);
- }
- ipath_reset_qp(qp, ibqp->qp_type);
- break;
-
- case IB_QPS_SQD:
- qp->s_draining = qp->s_last != qp->s_cur;
- qp->state = new_state;
- break;
-
- case IB_QPS_SQE:
- if (qp->ibqp.qp_type == IB_QPT_RC)
- goto inval;
- qp->state = new_state;
- break;
-
- case IB_QPS_ERR:
- lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
- break;
-
- default:
- qp->state = new_state;
- break;
- }
-
- if (attr_mask & IB_QP_PKEY_INDEX)
- qp->s_pkey_index = attr->pkey_index;
-
- if (attr_mask & IB_QP_DEST_QPN)
- qp->remote_qpn = attr->dest_qp_num;
-
- if (attr_mask & IB_QP_SQ_PSN) {
- qp->s_psn = qp->s_next_psn = attr->sq_psn;
- qp->s_last_psn = qp->s_next_psn - 1;
- }
-
- if (attr_mask & IB_QP_RQ_PSN)
- qp->r_psn = attr->rq_psn;
-
- if (attr_mask & IB_QP_ACCESS_FLAGS)
- qp->qp_access_flags = attr->qp_access_flags;
-
- if (attr_mask & IB_QP_AV) {
- qp->remote_ah_attr = attr->ah_attr;
- qp->s_dmult = ipath_ib_rate_to_mult(attr->ah_attr.static_rate);
- }
-
- if (attr_mask & IB_QP_PATH_MTU)
- qp->path_mtu = attr->path_mtu;
-
- if (attr_mask & IB_QP_RETRY_CNT)
- qp->s_retry = qp->s_retry_cnt = attr->retry_cnt;
-
- if (attr_mask & IB_QP_RNR_RETRY) {
- qp->s_rnr_retry = attr->rnr_retry;
- if (qp->s_rnr_retry > 7)
- qp->s_rnr_retry = 7;
- qp->s_rnr_retry_cnt = qp->s_rnr_retry;
- }
-
- if (attr_mask & IB_QP_MIN_RNR_TIMER)
- qp->r_min_rnr_timer = attr->min_rnr_timer;
-
- if (attr_mask & IB_QP_TIMEOUT)
- qp->timeout = attr->timeout;
-
- if (attr_mask & IB_QP_QKEY)
- qp->qkey = attr->qkey;
-
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
- qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
-
- if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
- qp->s_max_rd_atomic = attr->max_rd_atomic;
-
- spin_unlock_irq(&qp->s_lock);
-
- if (lastwqe) {
- struct ib_event ev;
-
- ev.device = qp->ibqp.device;
- ev.element.qp = &qp->ibqp;
- ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
- qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
- }
- ret = 0;
- goto bail;
-
-inval:
- spin_unlock_irq(&qp->s_lock);
- ret = -EINVAL;
-
-bail:
- return ret;
-}
-
-int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_qp_init_attr *init_attr)
-{
- struct ipath_qp *qp = to_iqp(ibqp);
-
- attr->qp_state = qp->state;
- attr->cur_qp_state = attr->qp_state;
- attr->path_mtu = qp->path_mtu;
- attr->path_mig_state = 0;
- attr->qkey = qp->qkey;
- attr->rq_psn = qp->r_psn;
- attr->sq_psn = qp->s_next_psn;
- attr->dest_qp_num = qp->remote_qpn;
- attr->qp_access_flags = qp->qp_access_flags;
- attr->cap.max_send_wr = qp->s_size - 1;
- attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
- attr->cap.max_send_sge = qp->s_max_sge;
- attr->cap.max_recv_sge = qp->r_rq.max_sge;
- attr->cap.max_inline_data = 0;
- attr->ah_attr = qp->remote_ah_attr;
- memset(&attr->alt_ah_attr, 0, sizeof(attr->alt_ah_attr));
- attr->pkey_index = qp->s_pkey_index;
- attr->alt_pkey_index = 0;
- attr->en_sqd_async_notify = 0;
- attr->sq_draining = qp->s_draining;
- attr->max_rd_atomic = qp->s_max_rd_atomic;
- attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
- attr->min_rnr_timer = qp->r_min_rnr_timer;
- attr->port_num = 1;
- attr->timeout = qp->timeout;
- attr->retry_cnt = qp->s_retry_cnt;
- attr->rnr_retry = qp->s_rnr_retry_cnt;
- attr->alt_port_num = 0;
- attr->alt_timeout = 0;
-
- init_attr->event_handler = qp->ibqp.event_handler;
- init_attr->qp_context = qp->ibqp.qp_context;
- init_attr->send_cq = qp->ibqp.send_cq;
- init_attr->recv_cq = qp->ibqp.recv_cq;
- init_attr->srq = qp->ibqp.srq;
- init_attr->cap = attr->cap;
- if (qp->s_flags & IPATH_S_SIGNAL_REQ_WR)
- init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
- else
- init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
- init_attr->qp_type = qp->ibqp.qp_type;
- init_attr->port_num = 1;
- return 0;
-}
-
-/**
- * ipath_compute_aeth - compute the AETH (syndrome + MSN)
- * @qp: the queue pair to compute the AETH for
- *
- * Returns the AETH.
- */
-__be32 ipath_compute_aeth(struct ipath_qp *qp)
-{
- u32 aeth = qp->r_msn & IPATH_MSN_MASK;
-
- if (qp->ibqp.srq) {
- /*
- * Shared receive queues don't generate credits.
- * Set the credit field to the invalid value.
- */
- aeth |= IPATH_AETH_CREDIT_INVAL << IPATH_AETH_CREDIT_SHIFT;
- } else {
- u32 min, max, x;
- u32 credits;
- struct ipath_rwq *wq = qp->r_rq.wq;
- u32 head;
- u32 tail;
-
- /* sanity check pointers before trusting them */
- head = wq->head;
- if (head >= qp->r_rq.size)
- head = 0;
- tail = wq->tail;
- if (tail >= qp->r_rq.size)
- tail = 0;
- /*
- * Compute the number of credits available (RWQEs).
- * XXX Not holding the r_rq.lock here so there is a small
- * chance that the pair of reads are not atomic.
- */
- credits = head - tail;
- if ((int)credits < 0)
- credits += qp->r_rq.size;
- /*
- * Binary search the credit table to find the code to
- * use.
- */
- min = 0;
- max = 31;
- for (;;) {
- x = (min + max) / 2;
- if (credit_table[x] == credits)
- break;
- if (credit_table[x] > credits)
- max = x;
- else if (min == x)
- break;
- else
- min = x;
- }
- aeth |= x << IPATH_AETH_CREDIT_SHIFT;
- }
- return cpu_to_be32(aeth);
-}
-
-/**
- * ipath_create_qp - create a queue pair for a device
- * @ibpd: the protection domain who's device we create the queue pair for
- * @init_attr: the attributes of the queue pair
- * @udata: unused by InfiniPath
- *
- * Returns the queue pair on success, otherwise returns an errno.
- *
- * Called by the ib_create_qp() core verbs function.
- */
-struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
-{
- struct ipath_qp *qp;
- int err;
- struct ipath_swqe *swq = NULL;
- struct ipath_ibdev *dev;
- size_t sz;
- size_t sg_list_sz;
- struct ib_qp *ret;
-
- if (init_attr->create_flags) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
-
- if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
- init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
-
- /* Check receive queue parameters if no SRQ is specified. */
- if (!init_attr->srq) {
- if (init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
- init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
- if (init_attr->cap.max_send_sge +
- init_attr->cap.max_send_wr +
- init_attr->cap.max_recv_sge +
- init_attr->cap.max_recv_wr == 0) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
- }
-
- switch (init_attr->qp_type) {
- case IB_QPT_UC:
- case IB_QPT_RC:
- case IB_QPT_UD:
- case IB_QPT_SMI:
- case IB_QPT_GSI:
- sz = sizeof(struct ipath_sge) *
- init_attr->cap.max_send_sge +
- sizeof(struct ipath_swqe);
- swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
- if (swq == NULL) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
- sz = sizeof(*qp);
- sg_list_sz = 0;
- if (init_attr->srq) {
- struct ipath_srq *srq = to_isrq(init_attr->srq);
-
- if (srq->rq.max_sge > 1)
- sg_list_sz = sizeof(*qp->r_sg_list) *
- (srq->rq.max_sge - 1);
- } else if (init_attr->cap.max_recv_sge > 1)
- sg_list_sz = sizeof(*qp->r_sg_list) *
- (init_attr->cap.max_recv_sge - 1);
- qp = kmalloc(sz + sg_list_sz, GFP_KERNEL);
- if (!qp) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_swq;
- }
- if (sg_list_sz && (init_attr->qp_type == IB_QPT_UD ||
- init_attr->qp_type == IB_QPT_SMI ||
- init_attr->qp_type == IB_QPT_GSI)) {
- qp->r_ud_sg_list = kmalloc(sg_list_sz, GFP_KERNEL);
- if (!qp->r_ud_sg_list) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_qp;
- }
- } else
- qp->r_ud_sg_list = NULL;
- if (init_attr->srq) {
- sz = 0;
- qp->r_rq.size = 0;
- qp->r_rq.max_sge = 0;
- qp->r_rq.wq = NULL;
- init_attr->cap.max_recv_wr = 0;
- init_attr->cap.max_recv_sge = 0;
- } else {
- qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
- qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
- sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
- sizeof(struct ipath_rwqe);
- qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) +
- qp->r_rq.size * sz);
- if (!qp->r_rq.wq) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_sg_list;
- }
- }
-
- /*
- * ib_create_qp() will initialize qp->ibqp
- * except for qp->ibqp.qp_num.
- */
- spin_lock_init(&qp->s_lock);
- spin_lock_init(&qp->r_rq.lock);
- atomic_set(&qp->refcount, 0);
- init_waitqueue_head(&qp->wait);
- init_waitqueue_head(&qp->wait_dma);
- tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp);
- INIT_LIST_HEAD(&qp->piowait);
- INIT_LIST_HEAD(&qp->timerwait);
- qp->state = IB_QPS_RESET;
- qp->s_wq = swq;
- qp->s_size = init_attr->cap.max_send_wr + 1;
- qp->s_max_sge = init_attr->cap.max_send_sge;
- if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
- qp->s_flags = IPATH_S_SIGNAL_REQ_WR;
- else
- qp->s_flags = 0;
- dev = to_idev(ibpd->device);
- err = ipath_alloc_qpn(&dev->qp_table, qp,
- init_attr->qp_type);
- if (err) {
- ret = ERR_PTR(err);
- vfree(qp->r_rq.wq);
- goto bail_sg_list;
- }
- qp->ip = NULL;
- qp->s_tx = NULL;
- ipath_reset_qp(qp, init_attr->qp_type);
- break;
-
- default:
- /* Don't support raw QPs */
- ret = ERR_PTR(-ENOSYS);
- goto bail;
- }
-
- init_attr->cap.max_inline_data = 0;
-
- /*
- * Return the address of the RWQ as the offset to mmap.
- * See ipath_mmap() for details.
- */
- if (udata && udata->outlen >= sizeof(__u64)) {
- if (!qp->r_rq.wq) {
- __u64 offset = 0;
-
- err = ib_copy_to_udata(udata, &offset,
- sizeof(offset));
- if (err) {
- ret = ERR_PTR(err);
- goto bail_ip;
- }
- } else {
- u32 s = sizeof(struct ipath_rwq) +
- qp->r_rq.size * sz;
-
- qp->ip =
- ipath_create_mmap_info(dev, s,
- ibpd->uobject->context,
- qp->r_rq.wq);
- if (!qp->ip) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_ip;
- }
-
- err = ib_copy_to_udata(udata, &(qp->ip->offset),
- sizeof(qp->ip->offset));
- if (err) {
- ret = ERR_PTR(err);
- goto bail_ip;
- }
- }
- }
-
- spin_lock(&dev->n_qps_lock);
- if (dev->n_qps_allocated == ib_ipath_max_qps) {
- spin_unlock(&dev->n_qps_lock);
- ret = ERR_PTR(-ENOMEM);
- goto bail_ip;
- }
-
- dev->n_qps_allocated++;
- spin_unlock(&dev->n_qps_lock);
-
- if (qp->ip) {
- spin_lock_irq(&dev->pending_lock);
- list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
- }
-
- ret = &qp->ibqp;
- goto bail;
-
-bail_ip:
- if (qp->ip)
- kref_put(&qp->ip->ref, ipath_release_mmap_info);
- else
- vfree(qp->r_rq.wq);
- ipath_free_qp(&dev->qp_table, qp);
- free_qpn(&dev->qp_table, qp->ibqp.qp_num);
-bail_sg_list:
- kfree(qp->r_ud_sg_list);
-bail_qp:
- kfree(qp);
-bail_swq:
- vfree(swq);
-bail:
- return ret;
-}
-
-/**
- * ipath_destroy_qp - destroy a queue pair
- * @ibqp: the queue pair to destroy
- *
- * Returns 0 on success.
- *
- * Note that this can be called while the QP is actively sending or
- * receiving!
- */
-int ipath_destroy_qp(struct ib_qp *ibqp)
-{
- struct ipath_qp *qp = to_iqp(ibqp);
- struct ipath_ibdev *dev = to_idev(ibqp->device);
-
- /* Make sure HW and driver activity is stopped. */
- spin_lock_irq(&qp->s_lock);
- if (qp->state != IB_QPS_RESET) {
- qp->state = IB_QPS_RESET;
- spin_lock(&dev->pending_lock);
- if (!list_empty(&qp->timerwait))
- list_del_init(&qp->timerwait);
- if (!list_empty(&qp->piowait))
- list_del_init(&qp->piowait);
- spin_unlock(&dev->pending_lock);
- qp->s_flags &= ~IPATH_S_ANY_WAIT;
- spin_unlock_irq(&qp->s_lock);
- /* Stop the sending tasklet */
- tasklet_kill(&qp->s_task);
- wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
- } else
- spin_unlock_irq(&qp->s_lock);
-
- ipath_free_qp(&dev->qp_table, qp);
-
- if (qp->s_tx) {
- atomic_dec(&qp->refcount);
- if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
- kfree(qp->s_tx->txreq.map_addr);
- spin_lock_irq(&dev->pending_lock);
- list_add(&qp->s_tx->txreq.list, &dev->txreq_free);
- spin_unlock_irq(&dev->pending_lock);
- qp->s_tx = NULL;
- }
-
- wait_event(qp->wait, !atomic_read(&qp->refcount));
-
- /* all user's cleaned up, mark it available */
- free_qpn(&dev->qp_table, qp->ibqp.qp_num);
- spin_lock(&dev->n_qps_lock);
- dev->n_qps_allocated--;
- spin_unlock(&dev->n_qps_lock);
-
- if (qp->ip)
- kref_put(&qp->ip->ref, ipath_release_mmap_info);
- else
- vfree(qp->r_rq.wq);
- kfree(qp->r_ud_sg_list);
- vfree(qp->s_wq);
- kfree(qp);
- return 0;
-}
-
-/**
- * ipath_init_qp_table - initialize the QP table for a device
- * @idev: the device who's QP table we're initializing
- * @size: the size of the QP table
- *
- * Returns 0 on success, otherwise returns an errno.
- */
-int ipath_init_qp_table(struct ipath_ibdev *idev, int size)
-{
- int i;
- int ret;
-
- idev->qp_table.last = 1; /* QPN 0 and 1 are special. */
- idev->qp_table.max = size;
- idev->qp_table.nmaps = 1;
- idev->qp_table.table = kcalloc(size, sizeof(*idev->qp_table.table),
- GFP_KERNEL);
- if (idev->qp_table.table == NULL) {
- ret = -ENOMEM;
- goto bail;
- }
-
- for (i = 0; i < ARRAY_SIZE(idev->qp_table.map); i++) {
- atomic_set(&idev->qp_table.map[i].n_free, BITS_PER_PAGE);
- idev->qp_table.map[i].page = NULL;
- }
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * ipath_get_credit - flush the send work queue of a QP
- * @qp: the qp who's send work queue to flush
- * @aeth: the Acknowledge Extended Transport Header
- *
- * The QP s_lock should be held.
- */
-void ipath_get_credit(struct ipath_qp *qp, u32 aeth)
-{
- u32 credit = (aeth >> IPATH_AETH_CREDIT_SHIFT) & IPATH_AETH_CREDIT_MASK;
-
- /*
- * If the credit is invalid, we can send
- * as many packets as we like. Otherwise, we have to
- * honor the credit field.
- */
- if (credit == IPATH_AETH_CREDIT_INVAL)
- qp->s_lsn = (u32) -1;
- else if (qp->s_lsn != (u32) -1) {
- /* Compute new LSN (i.e., MSN + credit) */
- credit = (aeth + credit_table[credit]) & IPATH_MSN_MASK;
- if (ipath_cmp24(credit, qp->s_lsn) > 0)
- qp->s_lsn = credit;
- }
-
- /* Restart sending if it was blocked due to lack of credits. */
- if ((qp->s_flags & IPATH_S_WAIT_SSN_CREDIT) &&
- qp->s_cur != qp->s_head &&
- (qp->s_lsn == (u32) -1 ||
- ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn,
- qp->s_lsn + 1) <= 0))
- ipath_schedule_send(qp);
-}
diff --git a/drivers/staging/rdma/ipath/ipath_rc.c b/drivers/staging/rdma/ipath/ipath_rc.c
deleted file mode 100644
index d4aa53574e57..000000000000
--- a/drivers/staging/rdma/ipath/ipath_rc.c
+++ /dev/null
@@ -1,1969 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/io.h>
-
-#include "ipath_verbs.h"
-#include "ipath_kernel.h"
-
-/* cut down ridiculously long IB macro names */
-#define OP(x) IB_OPCODE_RC_##x
-
-static u32 restart_sge(struct ipath_sge_state *ss, struct ipath_swqe *wqe,
- u32 psn, u32 pmtu)
-{
- u32 len;
-
- len = ((psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
- ss->sge = wqe->sg_list[0];
- ss->sg_list = wqe->sg_list + 1;
- ss->num_sge = wqe->wr.num_sge;
- ipath_skip_sge(ss, len);
- return wqe->length - len;
-}
-
-/**
- * ipath_init_restart- initialize the qp->s_sge after a restart
- * @qp: the QP who's SGE we're restarting
- * @wqe: the work queue to initialize the QP's SGE from
- *
- * The QP s_lock should be held and interrupts disabled.
- */
-static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
-{
- struct ipath_ibdev *dev;
-
- qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn,
- ib_mtu_enum_to_int(qp->path_mtu));
- dev = to_idev(qp->ibqp.device);
- spin_lock(&dev->pending_lock);
- if (list_empty(&qp->timerwait))
- list_add_tail(&qp->timerwait,
- &dev->pending[dev->pending_index]);
- spin_unlock(&dev->pending_lock);
-}
-
-/**
- * ipath_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
- * @qp: a pointer to the QP
- * @ohdr: a pointer to the IB header being constructed
- * @pmtu: the path MTU
- *
- * Return 1 if constructed; otherwise, return 0.
- * Note that we are in the responder's side of the QP context.
- * Note the QP s_lock must be held.
- */
-static int ipath_make_rc_ack(struct ipath_ibdev *dev, struct ipath_qp *qp,
- struct ipath_other_headers *ohdr, u32 pmtu)
-{
- struct ipath_ack_entry *e;
- u32 hwords;
- u32 len;
- u32 bth0;
- u32 bth2;
-
- /* Don't send an ACK if we aren't supposed to. */
- if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
- goto bail;
-
- /* header size in 32-bit words LRH+BTH = (8+12)/4. */
- hwords = 5;
-
- switch (qp->s_ack_state) {
- case OP(RDMA_READ_RESPONSE_LAST):
- case OP(RDMA_READ_RESPONSE_ONLY):
- case OP(ATOMIC_ACKNOWLEDGE):
- /*
- * We can increment the tail pointer now that the last
- * response has been sent instead of only being
- * constructed.
- */
- if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
- qp->s_tail_ack_queue = 0;
- /* FALLTHROUGH */
- case OP(SEND_ONLY):
- case OP(ACKNOWLEDGE):
- /* Check for no next entry in the queue. */
- if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
- if (qp->s_flags & IPATH_S_ACK_PENDING)
- goto normal;
- qp->s_ack_state = OP(ACKNOWLEDGE);
- goto bail;
- }
-
- e = &qp->s_ack_queue[qp->s_tail_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST)) {
- /* Copy SGE state in case we need to resend */
- qp->s_ack_rdma_sge = e->rdma_sge;
- qp->s_cur_sge = &qp->s_ack_rdma_sge;
- len = e->rdma_sge.sge.sge_length;
- if (len > pmtu) {
- len = pmtu;
- qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
- } else {
- qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
- e->sent = 1;
- }
- ohdr->u.aeth = ipath_compute_aeth(qp);
- hwords++;
- qp->s_ack_rdma_psn = e->psn;
- bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
- } else {
- /* COMPARE_SWAP or FETCH_ADD */
- qp->s_cur_sge = NULL;
- len = 0;
- qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
- ohdr->u.at.aeth = ipath_compute_aeth(qp);
- ohdr->u.at.atomic_ack_eth[0] =
- cpu_to_be32(e->atomic_data >> 32);
- ohdr->u.at.atomic_ack_eth[1] =
- cpu_to_be32(e->atomic_data);
- hwords += sizeof(ohdr->u.at) / sizeof(u32);
- bth2 = e->psn;
- e->sent = 1;
- }
- bth0 = qp->s_ack_state << 24;
- break;
-
- case OP(RDMA_READ_RESPONSE_FIRST):
- qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
- /* FALLTHROUGH */
- case OP(RDMA_READ_RESPONSE_MIDDLE):
- len = qp->s_ack_rdma_sge.sge.sge_length;
- if (len > pmtu)
- len = pmtu;
- else {
- ohdr->u.aeth = ipath_compute_aeth(qp);
- hwords++;
- qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
- qp->s_ack_queue[qp->s_tail_ack_queue].sent = 1;
- }
- bth0 = qp->s_ack_state << 24;
- bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
- break;
-
- default:
- normal:
- /*
- * Send a regular ACK.
- * Set the s_ack_state so we wait until after sending
- * the ACK before setting s_ack_state to ACKNOWLEDGE
- * (see above).
- */
- qp->s_ack_state = OP(SEND_ONLY);
- qp->s_flags &= ~IPATH_S_ACK_PENDING;
- qp->s_cur_sge = NULL;
- if (qp->s_nak_state)
- ohdr->u.aeth =
- cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
- (qp->s_nak_state <<
- IPATH_AETH_CREDIT_SHIFT));
- else
- ohdr->u.aeth = ipath_compute_aeth(qp);
- hwords++;
- len = 0;
- bth0 = OP(ACKNOWLEDGE) << 24;
- bth2 = qp->s_ack_psn & IPATH_PSN_MASK;
- }
- qp->s_hdrwords = hwords;
- qp->s_cur_size = len;
- ipath_make_ruc_header(dev, qp, ohdr, bth0, bth2);
- return 1;
-
-bail:
- return 0;
-}
-
-/**
- * ipath_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
- * @qp: a pointer to the QP
- *
- * Return 1 if constructed; otherwise, return 0.
- */
-int ipath_make_rc_req(struct ipath_qp *qp)
-{
- struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
- struct ipath_other_headers *ohdr;
- struct ipath_sge_state *ss;
- struct ipath_swqe *wqe;
- u32 hwords;
- u32 len;
- u32 bth0;
- u32 bth2;
- u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
- char newreq;
- unsigned long flags;
- int ret = 0;
-
- ohdr = &qp->s_hdr.u.oth;
- if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
- ohdr = &qp->s_hdr.u.l.oth;
-
- /*
- * The lock is needed to synchronize between the sending tasklet,
- * the receive interrupt handler, and timeout resends.
- */
- spin_lock_irqsave(&qp->s_lock, flags);
-
- /* Sending responses has higher priority over sending requests. */
- if ((qp->r_head_ack_queue != qp->s_tail_ack_queue ||
- (qp->s_flags & IPATH_S_ACK_PENDING) ||
- qp->s_ack_state != OP(ACKNOWLEDGE)) &&
- ipath_make_rc_ack(dev, qp, ohdr, pmtu))
- goto done;
-
- if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) {
- if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND))
- goto bail;
- /* We are in the error state, flush the work request. */
- if (qp->s_last == qp->s_head)
- goto bail;
- /* If DMAs are in progress, we can't flush immediately. */
- if (atomic_read(&qp->s_dma_busy)) {
- qp->s_flags |= IPATH_S_WAIT_DMA;
- goto bail;
- }
- wqe = get_swqe_ptr(qp, qp->s_last);
- ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
- goto done;
- }
-
- /* Leave BUSY set until RNR timeout. */
- if (qp->s_rnr_timeout) {
- qp->s_flags |= IPATH_S_WAITING;
- goto bail;
- }
-
- /* header size in 32-bit words LRH+BTH = (8+12)/4. */
- hwords = 5;
- bth0 = 1 << 22; /* Set M bit */
-
- /* Send a request. */
- wqe = get_swqe_ptr(qp, qp->s_cur);
- switch (qp->s_state) {
- default:
- if (!(ib_ipath_state_ops[qp->state] &
- IPATH_PROCESS_NEXT_SEND_OK))
- goto bail;
- /*
- * Resend an old request or start a new one.
- *
- * We keep track of the current SWQE so that
- * we don't reset the "furthest progress" state
- * if we need to back up.
- */
- newreq = 0;
- if (qp->s_cur == qp->s_tail) {
- /* Check if send work queue is empty. */
- if (qp->s_tail == qp->s_head)
- goto bail;
- /*
- * If a fence is requested, wait for previous
- * RDMA read and atomic operations to finish.
- */
- if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
- qp->s_num_rd_atomic) {
- qp->s_flags |= IPATH_S_FENCE_PENDING;
- goto bail;
- }
- wqe->psn = qp->s_next_psn;
- newreq = 1;
- }
- /*
- * Note that we have to be careful not to modify the
- * original work request since we may need to resend
- * it.
- */
- len = wqe->length;
- ss = &qp->s_sge;
- bth2 = 0;
- switch (wqe->wr.opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_IMM:
- /* If no credit, return. */
- if (qp->s_lsn != (u32) -1 &&
- ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
- qp->s_flags |= IPATH_S_WAIT_SSN_CREDIT;
- goto bail;
- }
- wqe->lpsn = wqe->psn;
- if (len > pmtu) {
- wqe->lpsn += (len - 1) / pmtu;
- qp->s_state = OP(SEND_FIRST);
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_SEND)
- qp->s_state = OP(SEND_ONLY);
- else {
- qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- }
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= 1 << 23;
- bth2 = 1 << 31; /* Request ACK. */
- if (++qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
-
- case IB_WR_RDMA_WRITE:
- if (newreq && qp->s_lsn != (u32) -1)
- qp->s_lsn++;
- /* FALLTHROUGH */
- case IB_WR_RDMA_WRITE_WITH_IMM:
- /* If no credit, return. */
- if (qp->s_lsn != (u32) -1 &&
- ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
- qp->s_flags |= IPATH_S_WAIT_SSN_CREDIT;
- goto bail;
- }
- ohdr->u.rc.reth.vaddr =
- cpu_to_be64(wqe->rdma_wr.remote_addr);
- ohdr->u.rc.reth.rkey =
- cpu_to_be32(wqe->rdma_wr.rkey);
- ohdr->u.rc.reth.length = cpu_to_be32(len);
- hwords += sizeof(struct ib_reth) / sizeof(u32);
- wqe->lpsn = wqe->psn;
- if (len > pmtu) {
- wqe->lpsn += (len - 1) / pmtu;
- qp->s_state = OP(RDMA_WRITE_FIRST);
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
- qp->s_state = OP(RDMA_WRITE_ONLY);
- else {
- qp->s_state =
- OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
- /* Immediate data comes after RETH */
- ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= 1 << 23;
- }
- bth2 = 1 << 31; /* Request ACK. */
- if (++qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
-
- case IB_WR_RDMA_READ:
- /*
- * Don't allow more operations to be started
- * than the QP limits allow.
- */
- if (newreq) {
- if (qp->s_num_rd_atomic >=
- qp->s_max_rd_atomic) {
- qp->s_flags |= IPATH_S_RDMAR_PENDING;
- goto bail;
- }
- qp->s_num_rd_atomic++;
- if (qp->s_lsn != (u32) -1)
- qp->s_lsn++;
- /*
- * Adjust s_next_psn to count the
- * expected number of responses.
- */
- if (len > pmtu)
- qp->s_next_psn += (len - 1) / pmtu;
- wqe->lpsn = qp->s_next_psn++;
- }
- ohdr->u.rc.reth.vaddr =
- cpu_to_be64(wqe->rdma_wr.remote_addr);
- ohdr->u.rc.reth.rkey =
- cpu_to_be32(wqe->rdma_wr.rkey);
- ohdr->u.rc.reth.length = cpu_to_be32(len);
- qp->s_state = OP(RDMA_READ_REQUEST);
- hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
- ss = NULL;
- len = 0;
- if (++qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
-
- case IB_WR_ATOMIC_CMP_AND_SWP:
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- /*
- * Don't allow more operations to be started
- * than the QP limits allow.
- */
- if (newreq) {
- if (qp->s_num_rd_atomic >=
- qp->s_max_rd_atomic) {
- qp->s_flags |= IPATH_S_RDMAR_PENDING;
- goto bail;
- }
- qp->s_num_rd_atomic++;
- if (qp->s_lsn != (u32) -1)
- qp->s_lsn++;
- wqe->lpsn = wqe->psn;
- }
- if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
- qp->s_state = OP(COMPARE_SWAP);
- ohdr->u.atomic_eth.swap_data = cpu_to_be64(
- wqe->atomic_wr.swap);
- ohdr->u.atomic_eth.compare_data = cpu_to_be64(
- wqe->atomic_wr.compare_add);
- } else {
- qp->s_state = OP(FETCH_ADD);
- ohdr->u.atomic_eth.swap_data = cpu_to_be64(
- wqe->atomic_wr.compare_add);
- ohdr->u.atomic_eth.compare_data = 0;
- }
- ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
- wqe->atomic_wr.remote_addr >> 32);
- ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
- wqe->atomic_wr.remote_addr);
- ohdr->u.atomic_eth.rkey = cpu_to_be32(
- wqe->atomic_wr.rkey);
- hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
- ss = NULL;
- len = 0;
- if (++qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
-
- default:
- goto bail;
- }
- qp->s_sge.sge = wqe->sg_list[0];
- qp->s_sge.sg_list = wqe->sg_list + 1;
- qp->s_sge.num_sge = wqe->wr.num_sge;
- qp->s_len = wqe->length;
- if (newreq) {
- qp->s_tail++;
- if (qp->s_tail >= qp->s_size)
- qp->s_tail = 0;
- }
- bth2 |= qp->s_psn & IPATH_PSN_MASK;
- if (wqe->wr.opcode == IB_WR_RDMA_READ)
- qp->s_psn = wqe->lpsn + 1;
- else {
- qp->s_psn++;
- if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
- qp->s_next_psn = qp->s_psn;
- }
- /*
- * Put the QP on the pending list so lost ACKs will cause
- * a retry. More than one request can be pending so the
- * QP may already be on the dev->pending list.
- */
- spin_lock(&dev->pending_lock);
- if (list_empty(&qp->timerwait))
- list_add_tail(&qp->timerwait,
- &dev->pending[dev->pending_index]);
- spin_unlock(&dev->pending_lock);
- break;
-
- case OP(RDMA_READ_RESPONSE_FIRST):
- /*
- * This case can only happen if a send is restarted.
- * See ipath_restart_rc().
- */
- ipath_init_restart(qp, wqe);
- /* FALLTHROUGH */
- case OP(SEND_FIRST):
- qp->s_state = OP(SEND_MIDDLE);
- /* FALLTHROUGH */
- case OP(SEND_MIDDLE):
- bth2 = qp->s_psn++ & IPATH_PSN_MASK;
- if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
- qp->s_next_psn = qp->s_psn;
- ss = &qp->s_sge;
- len = qp->s_len;
- if (len > pmtu) {
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_SEND)
- qp->s_state = OP(SEND_LAST);
- else {
- qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- }
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= 1 << 23;
- bth2 |= 1 << 31; /* Request ACK. */
- qp->s_cur++;
- if (qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
-
- case OP(RDMA_READ_RESPONSE_LAST):
- /*
- * This case can only happen if a RDMA write is restarted.
- * See ipath_restart_rc().
- */
- ipath_init_restart(qp, wqe);
- /* FALLTHROUGH */
- case OP(RDMA_WRITE_FIRST):
- qp->s_state = OP(RDMA_WRITE_MIDDLE);
- /* FALLTHROUGH */
- case OP(RDMA_WRITE_MIDDLE):
- bth2 = qp->s_psn++ & IPATH_PSN_MASK;
- if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
- qp->s_next_psn = qp->s_psn;
- ss = &qp->s_sge;
- len = qp->s_len;
- if (len > pmtu) {
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
- qp->s_state = OP(RDMA_WRITE_LAST);
- else {
- qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= 1 << 23;
- }
- bth2 |= 1 << 31; /* Request ACK. */
- qp->s_cur++;
- if (qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
-
- case OP(RDMA_READ_RESPONSE_MIDDLE):
- /*
- * This case can only happen if a RDMA read is restarted.
- * See ipath_restart_rc().
- */
- ipath_init_restart(qp, wqe);
- len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
- ohdr->u.rc.reth.vaddr =
- cpu_to_be64(wqe->rdma_wr.remote_addr + len);
- ohdr->u.rc.reth.rkey =
- cpu_to_be32(wqe->rdma_wr.rkey);
- ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len);
- qp->s_state = OP(RDMA_READ_REQUEST);
- hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
- bth2 = qp->s_psn & IPATH_PSN_MASK;
- qp->s_psn = wqe->lpsn + 1;
- ss = NULL;
- len = 0;
- qp->s_cur++;
- if (qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
- }
- if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT - 1) >= 0)
- bth2 |= 1 << 31; /* Request ACK. */
- qp->s_len -= len;
- qp->s_hdrwords = hwords;
- qp->s_cur_sge = ss;
- qp->s_cur_size = len;
- ipath_make_ruc_header(dev, qp, ohdr, bth0 | (qp->s_state << 24), bth2);
-done:
- ret = 1;
- goto unlock;
-
-bail:
- qp->s_flags &= ~IPATH_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
- return ret;
-}
-
-/**
- * send_rc_ack - Construct an ACK packet and send it
- * @qp: a pointer to the QP
- *
- * This is called from ipath_rc_rcv() and only uses the receive
- * side QP state.
- * Note that RDMA reads and atomics are handled in the
- * send side QP state and tasklet.
- */
-static void send_rc_ack(struct ipath_qp *qp)
-{
- struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
- struct ipath_devdata *dd;
- u16 lrh0;
- u32 bth0;
- u32 hwords;
- u32 __iomem *piobuf;
- struct ipath_ib_header hdr;
- struct ipath_other_headers *ohdr;
- unsigned long flags;
-
- spin_lock_irqsave(&qp->s_lock, flags);
-
- /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
- if (qp->r_head_ack_queue != qp->s_tail_ack_queue ||
- (qp->s_flags & IPATH_S_ACK_PENDING) ||
- qp->s_ack_state != OP(ACKNOWLEDGE))
- goto queue_ack;
-
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
- /* Don't try to send ACKs if the link isn't ACTIVE */
- dd = dev->dd;
- if (!(dd->ipath_flags & IPATH_LINKACTIVE))
- goto done;
-
- piobuf = ipath_getpiobuf(dd, 0, NULL);
- if (!piobuf) {
- /*
- * We are out of PIO buffers at the moment.
- * Pass responsibility for sending the ACK to the
- * send tasklet so that when a PIO buffer becomes
- * available, the ACK is sent ahead of other outgoing
- * packets.
- */
- spin_lock_irqsave(&qp->s_lock, flags);
- goto queue_ack;
- }
-
- /* Construct the header. */
- ohdr = &hdr.u.oth;
- lrh0 = IPATH_LRH_BTH;
- /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
- hwords = 6;
- if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
- hwords += ipath_make_grh(dev, &hdr.u.l.grh,
- &qp->remote_ah_attr.grh,
- hwords, 0);
- ohdr = &hdr.u.l.oth;
- lrh0 = IPATH_LRH_GRH;
- }
- /* read pkey_index w/o lock (its atomic) */
- bth0 = ipath_get_pkey(dd, qp->s_pkey_index) |
- (OP(ACKNOWLEDGE) << 24) | (1 << 22);
- if (qp->r_nak_state)
- ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
- (qp->r_nak_state <<
- IPATH_AETH_CREDIT_SHIFT));
- else
- ohdr->u.aeth = ipath_compute_aeth(qp);
- lrh0 |= qp->remote_ah_attr.sl << 4;
- hdr.lrh[0] = cpu_to_be16(lrh0);
- hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
- hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
- hdr.lrh[3] = cpu_to_be16(dd->ipath_lid |
- qp->remote_ah_attr.src_path_bits);
- ohdr->bth[0] = cpu_to_be32(bth0);
- ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
- ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
-
- writeq(hwords + 1, piobuf);
-
- if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
- u32 *hdrp = (u32 *) &hdr;
-
- ipath_flush_wc();
- __iowrite32_copy(piobuf + 2, hdrp, hwords - 1);
- ipath_flush_wc();
- __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
- } else
- __iowrite32_copy(piobuf + 2, (u32 *) &hdr, hwords);
-
- ipath_flush_wc();
-
- dev->n_unicast_xmit++;
- goto done;
-
-queue_ack:
- if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK) {
- dev->n_rc_qacks++;
- qp->s_flags |= IPATH_S_ACK_PENDING;
- qp->s_nak_state = qp->r_nak_state;
- qp->s_ack_psn = qp->r_ack_psn;
-
- /* Schedule the send tasklet. */
- ipath_schedule_send(qp);
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
-done:
- return;
-}
-
-/**
- * reset_psn - reset the QP state to send starting from PSN
- * @qp: the QP
- * @psn: the packet sequence number to restart at
- *
- * This is called from ipath_rc_rcv() to process an incoming RC ACK
- * for the given QP.
- * Called at interrupt level with the QP s_lock held.
- */
-static void reset_psn(struct ipath_qp *qp, u32 psn)
-{
- u32 n = qp->s_last;
- struct ipath_swqe *wqe = get_swqe_ptr(qp, n);
- u32 opcode;
-
- qp->s_cur = n;
-
- /*
- * If we are starting the request from the beginning,
- * let the normal send code handle initialization.
- */
- if (ipath_cmp24(psn, wqe->psn) <= 0) {
- qp->s_state = OP(SEND_LAST);
- goto done;
- }
-
- /* Find the work request opcode corresponding to the given PSN. */
- opcode = wqe->wr.opcode;
- for (;;) {
- int diff;
-
- if (++n == qp->s_size)
- n = 0;
- if (n == qp->s_tail)
- break;
- wqe = get_swqe_ptr(qp, n);
- diff = ipath_cmp24(psn, wqe->psn);
- if (diff < 0)
- break;
- qp->s_cur = n;
- /*
- * If we are starting the request from the beginning,
- * let the normal send code handle initialization.
- */
- if (diff == 0) {
- qp->s_state = OP(SEND_LAST);
- goto done;
- }
- opcode = wqe->wr.opcode;
- }
-
- /*
- * Set the state to restart in the middle of a request.
- * Don't change the s_sge, s_cur_sge, or s_cur_size.
- * See ipath_make_rc_req().
- */
- switch (opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_IMM:
- qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
- break;
-
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
- break;
-
- case IB_WR_RDMA_READ:
- qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
- break;
-
- default:
- /*
- * This case shouldn't happen since its only
- * one PSN per req.
- */
- qp->s_state = OP(SEND_LAST);
- }
-done:
- qp->s_psn = psn;
-}
-
-/**
- * ipath_restart_rc - back up requester to resend the last un-ACKed request
- * @qp: the QP to restart
- * @psn: packet sequence number for the request
- * @wc: the work completion request
- *
- * The QP s_lock should be held and interrupts disabled.
- */
-void ipath_restart_rc(struct ipath_qp *qp, u32 psn)
-{
- struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
- struct ipath_ibdev *dev;
-
- if (qp->s_retry == 0) {
- ipath_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
- ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
- goto bail;
- }
- qp->s_retry--;
-
- /*
- * Remove the QP from the timeout queue.
- * Note: it may already have been removed by ipath_ib_timer().
- */
- dev = to_idev(qp->ibqp.device);
- spin_lock(&dev->pending_lock);
- if (!list_empty(&qp->timerwait))
- list_del_init(&qp->timerwait);
- if (!list_empty(&qp->piowait))
- list_del_init(&qp->piowait);
- spin_unlock(&dev->pending_lock);
-
- if (wqe->wr.opcode == IB_WR_RDMA_READ)
- dev->n_rc_resends++;
- else
- dev->n_rc_resends += (qp->s_psn - psn) & IPATH_PSN_MASK;
-
- reset_psn(qp, psn);
- ipath_schedule_send(qp);
-
-bail:
- return;
-}
-
-static inline void update_last_psn(struct ipath_qp *qp, u32 psn)
-{
- qp->s_last_psn = psn;
-}
-
-/**
- * do_rc_ack - process an incoming RC ACK
- * @qp: the QP the ACK came in on
- * @psn: the packet sequence number of the ACK
- * @opcode: the opcode of the request that resulted in the ACK
- *
- * This is called from ipath_rc_rcv_resp() to process an incoming RC ACK
- * for the given QP.
- * Called at interrupt level with the QP s_lock held and interrupts disabled.
- * Returns 1 if OK, 0 if current operation should be aborted (NAK).
- */
-static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
- u64 val)
-{
- struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
- struct ib_wc wc;
- enum ib_wc_status status;
- struct ipath_swqe *wqe;
- int ret = 0;
- u32 ack_psn;
- int diff;
-
- /*
- * Remove the QP from the timeout queue (or RNR timeout queue).
- * If ipath_ib_timer() has already removed it,
- * it's OK since we hold the QP s_lock and ipath_restart_rc()
- * just won't find anything to restart if we ACK everything.
- */
- spin_lock(&dev->pending_lock);
- if (!list_empty(&qp->timerwait))
- list_del_init(&qp->timerwait);
- spin_unlock(&dev->pending_lock);
-
- /*
- * Note that NAKs implicitly ACK outstanding SEND and RDMA write
- * requests and implicitly NAK RDMA read and atomic requests issued
- * before the NAK'ed request. The MSN won't include the NAK'ed
- * request but will include an ACK'ed request(s).
- */
- ack_psn = psn;
- if (aeth >> 29)
- ack_psn--;
- wqe = get_swqe_ptr(qp, qp->s_last);
-
- /*
- * The MSN might be for a later WQE than the PSN indicates so
- * only complete WQEs that the PSN finishes.
- */
- while ((diff = ipath_cmp24(ack_psn, wqe->lpsn)) >= 0) {
- /*
- * RDMA_READ_RESPONSE_ONLY is a special case since
- * we want to generate completion events for everything
- * before the RDMA read, copy the data, then generate
- * the completion for the read.
- */
- if (wqe->wr.opcode == IB_WR_RDMA_READ &&
- opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
- diff == 0) {
- ret = 1;
- goto bail;
- }
- /*
- * If this request is a RDMA read or atomic, and the ACK is
- * for a later operation, this ACK NAKs the RDMA read or
- * atomic. In other words, only a RDMA_READ_LAST or ONLY
- * can ACK a RDMA read and likewise for atomic ops. Note
- * that the NAK case can only happen if relaxed ordering is
- * used and requests are sent after an RDMA read or atomic
- * is sent but before the response is received.
- */
- if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
- (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
- ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
- (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
- /*
- * The last valid PSN seen is the previous
- * request's.
- */
- update_last_psn(qp, wqe->psn - 1);
- /* Retry this request. */
- ipath_restart_rc(qp, wqe->psn);
- /*
- * No need to process the ACK/NAK since we are
- * restarting an earlier request.
- */
- goto bail;
- }
- if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
- *(u64 *) wqe->sg_list[0].vaddr = val;
- if (qp->s_num_rd_atomic &&
- (wqe->wr.opcode == IB_WR_RDMA_READ ||
- wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
- qp->s_num_rd_atomic--;
- /* Restart sending task if fence is complete */
- if (((qp->s_flags & IPATH_S_FENCE_PENDING) &&
- !qp->s_num_rd_atomic) ||
- qp->s_flags & IPATH_S_RDMAR_PENDING)
- ipath_schedule_send(qp);
- }
- /* Post a send completion queue entry if requested. */
- if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
- (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
- memset(&wc, 0, sizeof wc);
- wc.wr_id = wqe->wr.wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
- wc.byte_len = wqe->length;
- wc.qp = &qp->ibqp;
- wc.src_qp = qp->remote_qpn;
- wc.slid = qp->remote_ah_attr.dlid;
- wc.sl = qp->remote_ah_attr.sl;
- ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
- }
- qp->s_retry = qp->s_retry_cnt;
- /*
- * If we are completing a request which is in the process of
- * being resent, we can stop resending it since we know the
- * responder has already seen it.
- */
- if (qp->s_last == qp->s_cur) {
- if (++qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- qp->s_last = qp->s_cur;
- if (qp->s_last == qp->s_tail)
- break;
- wqe = get_swqe_ptr(qp, qp->s_cur);
- qp->s_state = OP(SEND_LAST);
- qp->s_psn = wqe->psn;
- } else {
- if (++qp->s_last >= qp->s_size)
- qp->s_last = 0;
- if (qp->state == IB_QPS_SQD && qp->s_last == qp->s_cur)
- qp->s_draining = 0;
- if (qp->s_last == qp->s_tail)
- break;
- wqe = get_swqe_ptr(qp, qp->s_last);
- }
- }
-
- switch (aeth >> 29) {
- case 0: /* ACK */
- dev->n_rc_acks++;
- /* If this is a partial ACK, reset the retransmit timer. */
- if (qp->s_last != qp->s_tail) {
- spin_lock(&dev->pending_lock);
- if (list_empty(&qp->timerwait))
- list_add_tail(&qp->timerwait,
- &dev->pending[dev->pending_index]);
- spin_unlock(&dev->pending_lock);
- /*
- * If we get a partial ACK for a resent operation,
- * we can stop resending the earlier packets and
- * continue with the next packet the receiver wants.
- */
- if (ipath_cmp24(qp->s_psn, psn) <= 0) {
- reset_psn(qp, psn + 1);
- ipath_schedule_send(qp);
- }
- } else if (ipath_cmp24(qp->s_psn, psn) <= 0) {
- qp->s_state = OP(SEND_LAST);
- qp->s_psn = psn + 1;
- }
- ipath_get_credit(qp, aeth);
- qp->s_rnr_retry = qp->s_rnr_retry_cnt;
- qp->s_retry = qp->s_retry_cnt;
- update_last_psn(qp, psn);
- ret = 1;
- goto bail;
-
- case 1: /* RNR NAK */
- dev->n_rnr_naks++;
- if (qp->s_last == qp->s_tail)
- goto bail;
- if (qp->s_rnr_retry == 0) {
- status = IB_WC_RNR_RETRY_EXC_ERR;
- goto class_b;
- }
- if (qp->s_rnr_retry_cnt < 7)
- qp->s_rnr_retry--;
-
- /* The last valid PSN is the previous PSN. */
- update_last_psn(qp, psn - 1);
-
- if (wqe->wr.opcode == IB_WR_RDMA_READ)
- dev->n_rc_resends++;
- else
- dev->n_rc_resends +=
- (qp->s_psn - psn) & IPATH_PSN_MASK;
-
- reset_psn(qp, psn);
-
- qp->s_rnr_timeout =
- ib_ipath_rnr_table[(aeth >> IPATH_AETH_CREDIT_SHIFT) &
- IPATH_AETH_CREDIT_MASK];
- ipath_insert_rnr_queue(qp);
- ipath_schedule_send(qp);
- goto bail;
-
- case 3: /* NAK */
- if (qp->s_last == qp->s_tail)
- goto bail;
- /* The last valid PSN is the previous PSN. */
- update_last_psn(qp, psn - 1);
- switch ((aeth >> IPATH_AETH_CREDIT_SHIFT) &
- IPATH_AETH_CREDIT_MASK) {
- case 0: /* PSN sequence error */
- dev->n_seq_naks++;
- /*
- * Back up to the responder's expected PSN.
- * Note that we might get a NAK in the middle of an
- * RDMA READ response which terminates the RDMA
- * READ.
- */
- ipath_restart_rc(qp, psn);
- break;
-
- case 1: /* Invalid Request */
- status = IB_WC_REM_INV_REQ_ERR;
- dev->n_other_naks++;
- goto class_b;
-
- case 2: /* Remote Access Error */
- status = IB_WC_REM_ACCESS_ERR;
- dev->n_other_naks++;
- goto class_b;
-
- case 3: /* Remote Operation Error */
- status = IB_WC_REM_OP_ERR;
- dev->n_other_naks++;
- class_b:
- ipath_send_complete(qp, wqe, status);
- ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
- break;
-
- default:
- /* Ignore other reserved NAK error codes */
- goto reserved;
- }
- qp->s_rnr_retry = qp->s_rnr_retry_cnt;
- goto bail;
-
- default: /* 2: reserved */
- reserved:
- /* Ignore reserved NAK codes. */
- goto bail;
- }
-
-bail:
- return ret;
-}
-
-/**
- * ipath_rc_rcv_resp - process an incoming RC response packet
- * @dev: the device this packet came in on
- * @ohdr: the other headers for this packet
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP for this packet
- * @opcode: the opcode for this packet
- * @psn: the packet sequence number for this packet
- * @hdrsize: the header length
- * @pmtu: the path MTU
- * @header_in_data: true if part of the header data is in the data buffer
- *
- * This is called from ipath_rc_rcv() to process an incoming RC response
- * packet for the given QP.
- * Called at interrupt level.
- */
-static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
- struct ipath_other_headers *ohdr,
- void *data, u32 tlen,
- struct ipath_qp *qp,
- u32 opcode,
- u32 psn, u32 hdrsize, u32 pmtu,
- int header_in_data)
-{
- struct ipath_swqe *wqe;
- enum ib_wc_status status;
- unsigned long flags;
- int diff;
- u32 pad;
- u32 aeth;
- u64 val;
-
- spin_lock_irqsave(&qp->s_lock, flags);
-
- /* Double check we can process this now that we hold the s_lock. */
- if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
- goto ack_done;
-
- /* Ignore invalid responses. */
- if (ipath_cmp24(psn, qp->s_next_psn) >= 0)
- goto ack_done;
-
- /* Ignore duplicate responses. */
- diff = ipath_cmp24(psn, qp->s_last_psn);
- if (unlikely(diff <= 0)) {
- /* Update credits for "ghost" ACKs */
- if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
- if (!header_in_data)
- aeth = be32_to_cpu(ohdr->u.aeth);
- else {
- aeth = be32_to_cpu(((__be32 *) data)[0]);
- data += sizeof(__be32);
- }
- if ((aeth >> 29) == 0)
- ipath_get_credit(qp, aeth);
- }
- goto ack_done;
- }
-
- if (unlikely(qp->s_last == qp->s_tail))
- goto ack_done;
- wqe = get_swqe_ptr(qp, qp->s_last);
- status = IB_WC_SUCCESS;
-
- switch (opcode) {
- case OP(ACKNOWLEDGE):
- case OP(ATOMIC_ACKNOWLEDGE):
- case OP(RDMA_READ_RESPONSE_FIRST):
- if (!header_in_data)
- aeth = be32_to_cpu(ohdr->u.aeth);
- else {
- aeth = be32_to_cpu(((__be32 *) data)[0]);
- data += sizeof(__be32);
- }
- if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
- if (!header_in_data) {
- __be32 *p = ohdr->u.at.atomic_ack_eth;
-
- val = ((u64) be32_to_cpu(p[0]) << 32) |
- be32_to_cpu(p[1]);
- } else
- val = be64_to_cpu(((__be64 *) data)[0]);
- } else
- val = 0;
- if (!do_rc_ack(qp, aeth, psn, opcode, val) ||
- opcode != OP(RDMA_READ_RESPONSE_FIRST))
- goto ack_done;
- hdrsize += 4;
- wqe = get_swqe_ptr(qp, qp->s_last);
- if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
- goto ack_op_err;
- qp->r_flags &= ~IPATH_R_RDMAR_SEQ;
- /*
- * If this is a response to a resent RDMA read, we
- * have to be careful to copy the data to the right
- * location.
- */
- qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
- wqe, psn, pmtu);
- goto read_middle;
-
- case OP(RDMA_READ_RESPONSE_MIDDLE):
- /* no AETH, no ACK */
- if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
- dev->n_rdma_seq++;
- if (qp->r_flags & IPATH_R_RDMAR_SEQ)
- goto ack_done;
- qp->r_flags |= IPATH_R_RDMAR_SEQ;
- ipath_restart_rc(qp, qp->s_last_psn + 1);
- goto ack_done;
- }
- if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
- goto ack_op_err;
- read_middle:
- if (unlikely(tlen != (hdrsize + pmtu + 4)))
- goto ack_len_err;
- if (unlikely(pmtu >= qp->s_rdma_read_len))
- goto ack_len_err;
-
- /* We got a response so update the timeout. */
- spin_lock(&dev->pending_lock);
- if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
- list_move_tail(&qp->timerwait,
- &dev->pending[dev->pending_index]);
- spin_unlock(&dev->pending_lock);
-
- if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
- qp->s_retry = qp->s_retry_cnt;
-
- /*
- * Update the RDMA receive state but do the copy w/o
- * holding the locks and blocking interrupts.
- */
- qp->s_rdma_read_len -= pmtu;
- update_last_psn(qp, psn);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- ipath_copy_sge(&qp->s_rdma_read_sge, data, pmtu);
- goto bail;
-
- case OP(RDMA_READ_RESPONSE_ONLY):
- if (!header_in_data)
- aeth = be32_to_cpu(ohdr->u.aeth);
- else
- aeth = be32_to_cpu(((__be32 *) data)[0]);
- if (!do_rc_ack(qp, aeth, psn, opcode, 0))
- goto ack_done;
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /*
- * Check that the data size is >= 0 && <= pmtu.
- * Remember to account for the AETH header (4) and
- * ICRC (4).
- */
- if (unlikely(tlen < (hdrsize + pad + 8)))
- goto ack_len_err;
- /*
- * If this is a response to a resent RDMA read, we
- * have to be careful to copy the data to the right
- * location.
- */
- wqe = get_swqe_ptr(qp, qp->s_last);
- qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
- wqe, psn, pmtu);
- goto read_last;
-
- case OP(RDMA_READ_RESPONSE_LAST):
- /* ACKs READ req. */
- if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
- dev->n_rdma_seq++;
- if (qp->r_flags & IPATH_R_RDMAR_SEQ)
- goto ack_done;
- qp->r_flags |= IPATH_R_RDMAR_SEQ;
- ipath_restart_rc(qp, qp->s_last_psn + 1);
- goto ack_done;
- }
- if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
- goto ack_op_err;
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /*
- * Check that the data size is >= 1 && <= pmtu.
- * Remember to account for the AETH header (4) and
- * ICRC (4).
- */
- if (unlikely(tlen <= (hdrsize + pad + 8)))
- goto ack_len_err;
- read_last:
- tlen -= hdrsize + pad + 8;
- if (unlikely(tlen != qp->s_rdma_read_len))
- goto ack_len_err;
- if (!header_in_data)
- aeth = be32_to_cpu(ohdr->u.aeth);
- else {
- aeth = be32_to_cpu(((__be32 *) data)[0]);
- data += sizeof(__be32);
- }
- ipath_copy_sge(&qp->s_rdma_read_sge, data, tlen);
- (void) do_rc_ack(qp, aeth, psn,
- OP(RDMA_READ_RESPONSE_LAST), 0);
- goto ack_done;
- }
-
-ack_op_err:
- status = IB_WC_LOC_QP_OP_ERR;
- goto ack_err;
-
-ack_len_err:
- status = IB_WC_LOC_LEN_ERR;
-ack_err:
- ipath_send_complete(qp, wqe, status);
- ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
-ack_done:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-bail:
- return;
-}
-
-/**
- * ipath_rc_rcv_error - process an incoming duplicate or error RC packet
- * @dev: the device this packet came in on
- * @ohdr: the other headers for this packet
- * @data: the packet data
- * @qp: the QP for this packet
- * @opcode: the opcode for this packet
- * @psn: the packet sequence number for this packet
- * @diff: the difference between the PSN and the expected PSN
- * @header_in_data: true if part of the header data is in the data buffer
- *
- * This is called from ipath_rc_rcv() to process an unexpected
- * incoming RC packet for the given QP.
- * Called at interrupt level.
- * Return 1 if no more processing is needed; otherwise return 0 to
- * schedule a response to be sent.
- */
-static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
- struct ipath_other_headers *ohdr,
- void *data,
- struct ipath_qp *qp,
- u32 opcode,
- u32 psn,
- int diff,
- int header_in_data)
-{
- struct ipath_ack_entry *e;
- u8 i, prev;
- int old_req;
- unsigned long flags;
-
- if (diff > 0) {
- /*
- * Packet sequence error.
- * A NAK will ACK earlier sends and RDMA writes.
- * Don't queue the NAK if we already sent one.
- */
- if (!qp->r_nak_state) {
- qp->r_nak_state = IB_NAK_PSN_ERROR;
- /* Use the expected PSN. */
- qp->r_ack_psn = qp->r_psn;
- goto send_ack;
- }
- goto done;
- }
-
- /*
- * Handle a duplicate request. Don't re-execute SEND, RDMA
- * write or atomic op. Don't NAK errors, just silently drop
- * the duplicate request. Note that r_sge, r_len, and
- * r_rcv_len may be in use so don't modify them.
- *
- * We are supposed to ACK the earliest duplicate PSN but we
- * can coalesce an outstanding duplicate ACK. We have to
- * send the earliest so that RDMA reads can be restarted at
- * the requester's expected PSN.
- *
- * First, find where this duplicate PSN falls within the
- * ACKs previously sent.
- */
- psn &= IPATH_PSN_MASK;
- e = NULL;
- old_req = 1;
-
- spin_lock_irqsave(&qp->s_lock, flags);
- /* Double check we can process this now that we hold the s_lock. */
- if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
- goto unlock_done;
-
- for (i = qp->r_head_ack_queue; ; i = prev) {
- if (i == qp->s_tail_ack_queue)
- old_req = 0;
- if (i)
- prev = i - 1;
- else
- prev = IPATH_MAX_RDMA_ATOMIC;
- if (prev == qp->r_head_ack_queue) {
- e = NULL;
- break;
- }
- e = &qp->s_ack_queue[prev];
- if (!e->opcode) {
- e = NULL;
- break;
- }
- if (ipath_cmp24(psn, e->psn) >= 0) {
- if (prev == qp->s_tail_ack_queue)
- old_req = 0;
- break;
- }
- }
- switch (opcode) {
- case OP(RDMA_READ_REQUEST): {
- struct ib_reth *reth;
- u32 offset;
- u32 len;
-
- /*
- * If we didn't find the RDMA read request in the ack queue,
- * or the send tasklet is already backed up to send an
- * earlier entry, we can ignore this request.
- */
- if (!e || e->opcode != OP(RDMA_READ_REQUEST) || old_req)
- goto unlock_done;
- /* RETH comes after BTH */
- if (!header_in_data)
- reth = &ohdr->u.rc.reth;
- else {
- reth = (struct ib_reth *)data;
- data += sizeof(*reth);
- }
- /*
- * Address range must be a subset of the original
- * request and start on pmtu boundaries.
- * We reuse the old ack_queue slot since the requester
- * should not back up and request an earlier PSN for the
- * same request.
- */
- offset = ((psn - e->psn) & IPATH_PSN_MASK) *
- ib_mtu_enum_to_int(qp->path_mtu);
- len = be32_to_cpu(reth->length);
- if (unlikely(offset + len > e->rdma_sge.sge.sge_length))
- goto unlock_done;
- if (len != 0) {
- u32 rkey = be32_to_cpu(reth->rkey);
- u64 vaddr = be64_to_cpu(reth->vaddr);
- int ok;
-
- ok = ipath_rkey_ok(qp, &e->rdma_sge,
- len, vaddr, rkey,
- IB_ACCESS_REMOTE_READ);
- if (unlikely(!ok))
- goto unlock_done;
- } else {
- e->rdma_sge.sg_list = NULL;
- e->rdma_sge.num_sge = 0;
- e->rdma_sge.sge.mr = NULL;
- e->rdma_sge.sge.vaddr = NULL;
- e->rdma_sge.sge.length = 0;
- e->rdma_sge.sge.sge_length = 0;
- }
- e->psn = psn;
- qp->s_ack_state = OP(ACKNOWLEDGE);
- qp->s_tail_ack_queue = prev;
- break;
- }
-
- case OP(COMPARE_SWAP):
- case OP(FETCH_ADD): {
- /*
- * If we didn't find the atomic request in the ack queue
- * or the send tasklet is already backed up to send an
- * earlier entry, we can ignore this request.
- */
- if (!e || e->opcode != (u8) opcode || old_req)
- goto unlock_done;
- qp->s_ack_state = OP(ACKNOWLEDGE);
- qp->s_tail_ack_queue = prev;
- break;
- }
-
- default:
- if (old_req)
- goto unlock_done;
- /*
- * Resend the most recent ACK if this request is
- * after all the previous RDMA reads and atomics.
- */
- if (i == qp->r_head_ack_queue) {
- spin_unlock_irqrestore(&qp->s_lock, flags);
- qp->r_nak_state = 0;
- qp->r_ack_psn = qp->r_psn - 1;
- goto send_ack;
- }
- /*
- * Try to send a simple ACK to work around a Mellanox bug
- * which doesn't accept a RDMA read response or atomic
- * response as an ACK for earlier SENDs or RDMA writes.
- */
- if (qp->r_head_ack_queue == qp->s_tail_ack_queue &&
- !(qp->s_flags & IPATH_S_ACK_PENDING) &&
- qp->s_ack_state == OP(ACKNOWLEDGE)) {
- spin_unlock_irqrestore(&qp->s_lock, flags);
- qp->r_nak_state = 0;
- qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
- goto send_ack;
- }
- /*
- * Resend the RDMA read or atomic op which
- * ACKs this duplicate request.
- */
- qp->s_ack_state = OP(ACKNOWLEDGE);
- qp->s_tail_ack_queue = i;
- break;
- }
- qp->r_nak_state = 0;
- ipath_schedule_send(qp);
-
-unlock_done:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-done:
- return 1;
-
-send_ack:
- return 0;
-}
-
-void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
-{
- unsigned long flags;
- int lastwqe;
-
- spin_lock_irqsave(&qp->s_lock, flags);
- lastwqe = ipath_error_qp(qp, err);
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
- if (lastwqe) {
- struct ib_event ev;
-
- ev.device = qp->ibqp.device;
- ev.element.qp = &qp->ibqp;
- ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
- qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
- }
-}
-
-static inline void ipath_update_ack_queue(struct ipath_qp *qp, unsigned n)
-{
- unsigned next;
-
- next = n + 1;
- if (next > IPATH_MAX_RDMA_ATOMIC)
- next = 0;
- if (n == qp->s_tail_ack_queue) {
- qp->s_tail_ack_queue = next;
- qp->s_ack_state = OP(ACKNOWLEDGE);
- }
-}
-
-/**
- * ipath_rc_rcv - process an incoming RC packet
- * @dev: the device this packet came in on
- * @hdr: the header of this packet
- * @has_grh: true if the header has a GRH
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP for this packet
- *
- * This is called from ipath_qp_rcv() to process an incoming RC packet
- * for the given QP.
- * Called at interrupt level.
- */
-void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
-{
- struct ipath_other_headers *ohdr;
- u32 opcode;
- u32 hdrsize;
- u32 psn;
- u32 pad;
- struct ib_wc wc;
- u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
- int diff;
- struct ib_reth *reth;
- int header_in_data;
- unsigned long flags;
-
- /* Validate the SLID. See Ch. 9.6.1.5 */
- if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid))
- goto done;
-
- /* Check for GRH */
- if (!has_grh) {
- ohdr = &hdr->u.oth;
- hdrsize = 8 + 12; /* LRH + BTH */
- psn = be32_to_cpu(ohdr->bth[2]);
- header_in_data = 0;
- } else {
- ohdr = &hdr->u.l.oth;
- hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
- /*
- * The header with GRH is 60 bytes and the core driver sets
- * the eager header buffer size to 56 bytes so the last 4
- * bytes of the BTH header (PSN) is in the data buffer.
- */
- header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
- if (header_in_data) {
- psn = be32_to_cpu(((__be32 *) data)[0]);
- data += sizeof(__be32);
- } else
- psn = be32_to_cpu(ohdr->bth[2]);
- }
-
- /*
- * Process responses (ACKs) before anything else. Note that the
- * packet sequence number will be for something in the send work
- * queue rather than the expected receive packet sequence number.
- * In other words, this QP is the requester.
- */
- opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
- if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
- opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
- ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn,
- hdrsize, pmtu, header_in_data);
- goto done;
- }
-
- /* Compute 24 bits worth of difference. */
- diff = ipath_cmp24(psn, qp->r_psn);
- if (unlikely(diff)) {
- if (ipath_rc_rcv_error(dev, ohdr, data, qp, opcode,
- psn, diff, header_in_data))
- goto done;
- goto send_ack;
- }
-
- /* Check for opcode sequence errors. */
- switch (qp->r_state) {
- case OP(SEND_FIRST):
- case OP(SEND_MIDDLE):
- if (opcode == OP(SEND_MIDDLE) ||
- opcode == OP(SEND_LAST) ||
- opcode == OP(SEND_LAST_WITH_IMMEDIATE))
- break;
- goto nack_inv;
-
- case OP(RDMA_WRITE_FIRST):
- case OP(RDMA_WRITE_MIDDLE):
- if (opcode == OP(RDMA_WRITE_MIDDLE) ||
- opcode == OP(RDMA_WRITE_LAST) ||
- opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
- break;
- goto nack_inv;
-
- default:
- if (opcode == OP(SEND_MIDDLE) ||
- opcode == OP(SEND_LAST) ||
- opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
- opcode == OP(RDMA_WRITE_MIDDLE) ||
- opcode == OP(RDMA_WRITE_LAST) ||
- opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
- goto nack_inv;
- /*
- * Note that it is up to the requester to not send a new
- * RDMA read or atomic operation before receiving an ACK
- * for the previous operation.
- */
- break;
- }
-
- memset(&wc, 0, sizeof wc);
-
- /* OK, process the packet. */
- switch (opcode) {
- case OP(SEND_FIRST):
- if (!ipath_get_rwqe(qp, 0))
- goto rnr_nak;
- qp->r_rcv_len = 0;
- /* FALLTHROUGH */
- case OP(SEND_MIDDLE):
- case OP(RDMA_WRITE_MIDDLE):
- send_middle:
- /* Check for invalid length PMTU or posted rwqe len. */
- if (unlikely(tlen != (hdrsize + pmtu + 4)))
- goto nack_inv;
- qp->r_rcv_len += pmtu;
- if (unlikely(qp->r_rcv_len > qp->r_len))
- goto nack_inv;
- ipath_copy_sge(&qp->r_sge, data, pmtu);
- break;
-
- case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
- /* consume RWQE */
- if (!ipath_get_rwqe(qp, 1))
- goto rnr_nak;
- goto send_last_imm;
-
- case OP(SEND_ONLY):
- case OP(SEND_ONLY_WITH_IMMEDIATE):
- if (!ipath_get_rwqe(qp, 0))
- goto rnr_nak;
- qp->r_rcv_len = 0;
- if (opcode == OP(SEND_ONLY))
- goto send_last;
- /* FALLTHROUGH */
- case OP(SEND_LAST_WITH_IMMEDIATE):
- send_last_imm:
- if (header_in_data) {
- wc.ex.imm_data = *(__be32 *) data;
- data += sizeof(__be32);
- } else {
- /* Immediate data comes after BTH */
- wc.ex.imm_data = ohdr->u.imm_data;
- }
- hdrsize += 4;
- wc.wc_flags = IB_WC_WITH_IMM;
- /* FALLTHROUGH */
- case OP(SEND_LAST):
- case OP(RDMA_WRITE_LAST):
- send_last:
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /* Check for invalid length. */
- /* XXX LAST len should be >= 1 */
- if (unlikely(tlen < (hdrsize + pad + 4)))
- goto nack_inv;
- /* Don't count the CRC. */
- tlen -= (hdrsize + pad + 4);
- wc.byte_len = tlen + qp->r_rcv_len;
- if (unlikely(wc.byte_len > qp->r_len))
- goto nack_inv;
- ipath_copy_sge(&qp->r_sge, data, tlen);
- qp->r_msn++;
- if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags))
- break;
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
- opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
- wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- else
- wc.opcode = IB_WC_RECV;
- wc.qp = &qp->ibqp;
- wc.src_qp = qp->remote_qpn;
- wc.slid = qp->remote_ah_attr.dlid;
- wc.sl = qp->remote_ah_attr.sl;
- /* Signal completion event if the solicited bit is set. */
- ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
- (ohdr->bth[0] &
- cpu_to_be32(1 << 23)) != 0);
- break;
-
- case OP(RDMA_WRITE_FIRST):
- case OP(RDMA_WRITE_ONLY):
- case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
- if (unlikely(!(qp->qp_access_flags &
- IB_ACCESS_REMOTE_WRITE)))
- goto nack_inv;
- /* consume RWQE */
- /* RETH comes after BTH */
- if (!header_in_data)
- reth = &ohdr->u.rc.reth;
- else {
- reth = (struct ib_reth *)data;
- data += sizeof(*reth);
- }
- hdrsize += sizeof(*reth);
- qp->r_len = be32_to_cpu(reth->length);
- qp->r_rcv_len = 0;
- if (qp->r_len != 0) {
- u32 rkey = be32_to_cpu(reth->rkey);
- u64 vaddr = be64_to_cpu(reth->vaddr);
- int ok;
-
- /* Check rkey & NAK */
- ok = ipath_rkey_ok(qp, &qp->r_sge,
- qp->r_len, vaddr, rkey,
- IB_ACCESS_REMOTE_WRITE);
- if (unlikely(!ok))
- goto nack_acc;
- } else {
- qp->r_sge.sg_list = NULL;
- qp->r_sge.sge.mr = NULL;
- qp->r_sge.sge.vaddr = NULL;
- qp->r_sge.sge.length = 0;
- qp->r_sge.sge.sge_length = 0;
- }
- if (opcode == OP(RDMA_WRITE_FIRST))
- goto send_middle;
- else if (opcode == OP(RDMA_WRITE_ONLY))
- goto send_last;
- if (!ipath_get_rwqe(qp, 1))
- goto rnr_nak;
- goto send_last_imm;
-
- case OP(RDMA_READ_REQUEST): {
- struct ipath_ack_entry *e;
- u32 len;
- u8 next;
-
- if (unlikely(!(qp->qp_access_flags &
- IB_ACCESS_REMOTE_READ)))
- goto nack_inv;
- next = qp->r_head_ack_queue + 1;
- if (next > IPATH_MAX_RDMA_ATOMIC)
- next = 0;
- spin_lock_irqsave(&qp->s_lock, flags);
- /* Double check we can process this while holding the s_lock. */
- if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
- goto unlock;
- if (unlikely(next == qp->s_tail_ack_queue)) {
- if (!qp->s_ack_queue[next].sent)
- goto nack_inv_unlck;
- ipath_update_ack_queue(qp, next);
- }
- e = &qp->s_ack_queue[qp->r_head_ack_queue];
- /* RETH comes after BTH */
- if (!header_in_data)
- reth = &ohdr->u.rc.reth;
- else {
- reth = (struct ib_reth *)data;
- data += sizeof(*reth);
- }
- len = be32_to_cpu(reth->length);
- if (len) {
- u32 rkey = be32_to_cpu(reth->rkey);
- u64 vaddr = be64_to_cpu(reth->vaddr);
- int ok;
-
- /* Check rkey & NAK */
- ok = ipath_rkey_ok(qp, &e->rdma_sge, len, vaddr,
- rkey, IB_ACCESS_REMOTE_READ);
- if (unlikely(!ok))
- goto nack_acc_unlck;
- /*
- * Update the next expected PSN. We add 1 later
- * below, so only add the remainder here.
- */
- if (len > pmtu)
- qp->r_psn += (len - 1) / pmtu;
- } else {
- e->rdma_sge.sg_list = NULL;
- e->rdma_sge.num_sge = 0;
- e->rdma_sge.sge.mr = NULL;
- e->rdma_sge.sge.vaddr = NULL;
- e->rdma_sge.sge.length = 0;
- e->rdma_sge.sge.sge_length = 0;
- }
- e->opcode = opcode;
- e->sent = 0;
- e->psn = psn;
- /*
- * We need to increment the MSN here instead of when we
- * finish sending the result since a duplicate request would
- * increment it more than once.
- */
- qp->r_msn++;
- qp->r_psn++;
- qp->r_state = opcode;
- qp->r_nak_state = 0;
- qp->r_head_ack_queue = next;
-
- /* Schedule the send tasklet. */
- ipath_schedule_send(qp);
-
- goto unlock;
- }
-
- case OP(COMPARE_SWAP):
- case OP(FETCH_ADD): {
- struct ib_atomic_eth *ateth;
- struct ipath_ack_entry *e;
- u64 vaddr;
- atomic64_t *maddr;
- u64 sdata;
- u32 rkey;
- u8 next;
-
- if (unlikely(!(qp->qp_access_flags &
- IB_ACCESS_REMOTE_ATOMIC)))
- goto nack_inv;
- next = qp->r_head_ack_queue + 1;
- if (next > IPATH_MAX_RDMA_ATOMIC)
- next = 0;
- spin_lock_irqsave(&qp->s_lock, flags);
- /* Double check we can process this while holding the s_lock. */
- if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
- goto unlock;
- if (unlikely(next == qp->s_tail_ack_queue)) {
- if (!qp->s_ack_queue[next].sent)
- goto nack_inv_unlck;
- ipath_update_ack_queue(qp, next);
- }
- if (!header_in_data)
- ateth = &ohdr->u.atomic_eth;
- else
- ateth = (struct ib_atomic_eth *)data;
- vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
- be32_to_cpu(ateth->vaddr[1]);
- if (unlikely(vaddr & (sizeof(u64) - 1)))
- goto nack_inv_unlck;
- rkey = be32_to_cpu(ateth->rkey);
- /* Check rkey & NAK */
- if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge,
- sizeof(u64), vaddr, rkey,
- IB_ACCESS_REMOTE_ATOMIC)))
- goto nack_acc_unlck;
- /* Perform atomic OP and save result. */
- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
- sdata = be64_to_cpu(ateth->swap_data);
- e = &qp->s_ack_queue[qp->r_head_ack_queue];
- e->atomic_data = (opcode == OP(FETCH_ADD)) ?
- (u64) atomic64_add_return(sdata, maddr) - sdata :
- (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
- be64_to_cpu(ateth->compare_data),
- sdata);
- e->opcode = opcode;
- e->sent = 0;
- e->psn = psn & IPATH_PSN_MASK;
- qp->r_msn++;
- qp->r_psn++;
- qp->r_state = opcode;
- qp->r_nak_state = 0;
- qp->r_head_ack_queue = next;
-
- /* Schedule the send tasklet. */
- ipath_schedule_send(qp);
-
- goto unlock;
- }
-
- default:
- /* NAK unknown opcodes. */
- goto nack_inv;
- }
- qp->r_psn++;
- qp->r_state = opcode;
- qp->r_ack_psn = psn;
- qp->r_nak_state = 0;
- /* Send an ACK if requested or required. */
- if (psn & (1 << 31))
- goto send_ack;
- goto done;
-
-rnr_nak:
- qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
- qp->r_ack_psn = qp->r_psn;
- goto send_ack;
-
-nack_inv_unlck:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-nack_inv:
- ipath_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- qp->r_nak_state = IB_NAK_INVALID_REQUEST;
- qp->r_ack_psn = qp->r_psn;
- goto send_ack;
-
-nack_acc_unlck:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-nack_acc:
- ipath_rc_error(qp, IB_WC_LOC_PROT_ERR);
- qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
- qp->r_ack_psn = qp->r_psn;
-send_ack:
- send_rc_ack(qp);
- goto done;
-
-unlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-done:
- return;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_registers.h b/drivers/staging/rdma/ipath/ipath_registers.h
deleted file mode 100644
index 8f44d0cf3833..000000000000
--- a/drivers/staging/rdma/ipath/ipath_registers.h
+++ /dev/null
@@ -1,512 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _IPATH_REGISTERS_H
-#define _IPATH_REGISTERS_H
-
-/*
- * This file should only be included by kernel source, and by the diags. It
- * defines the registers, and their contents, for InfiniPath chips.
- */
-
-/*
- * These are the InfiniPath register and buffer bit definitions,
- * that are visible to software, and needed only by the kernel
- * and diag code. A few, that are visible to protocol and user
- * code are in ipath_common.h. Some bits are specific
- * to a given chip implementation, and have been moved to the
- * chip-specific source file
- */
-
-/* kr_revision bits */
-#define INFINIPATH_R_CHIPREVMINOR_MASK 0xFF
-#define INFINIPATH_R_CHIPREVMINOR_SHIFT 0
-#define INFINIPATH_R_CHIPREVMAJOR_MASK 0xFF
-#define INFINIPATH_R_CHIPREVMAJOR_SHIFT 8
-#define INFINIPATH_R_ARCH_MASK 0xFF
-#define INFINIPATH_R_ARCH_SHIFT 16
-#define INFINIPATH_R_SOFTWARE_MASK 0xFF
-#define INFINIPATH_R_SOFTWARE_SHIFT 24
-#define INFINIPATH_R_BOARDID_MASK 0xFF
-#define INFINIPATH_R_BOARDID_SHIFT 32
-
-/* kr_control bits */
-#define INFINIPATH_C_FREEZEMODE 0x00000002
-#define INFINIPATH_C_LINKENABLE 0x00000004
-
-/* kr_sendctrl bits */
-#define INFINIPATH_S_DISARMPIOBUF_SHIFT 16
-#define INFINIPATH_S_UPDTHRESH_SHIFT 24
-#define INFINIPATH_S_UPDTHRESH_MASK 0x1f
-
-#define IPATH_S_ABORT 0
-#define IPATH_S_PIOINTBUFAVAIL 1
-#define IPATH_S_PIOBUFAVAILUPD 2
-#define IPATH_S_PIOENABLE 3
-#define IPATH_S_SDMAINTENABLE 9
-#define IPATH_S_SDMASINGLEDESCRIPTOR 10
-#define IPATH_S_SDMAENABLE 11
-#define IPATH_S_SDMAHALT 12
-#define IPATH_S_DISARM 31
-
-#define INFINIPATH_S_ABORT (1U << IPATH_S_ABORT)
-#define INFINIPATH_S_PIOINTBUFAVAIL (1U << IPATH_S_PIOINTBUFAVAIL)
-#define INFINIPATH_S_PIOBUFAVAILUPD (1U << IPATH_S_PIOBUFAVAILUPD)
-#define INFINIPATH_S_PIOENABLE (1U << IPATH_S_PIOENABLE)
-#define INFINIPATH_S_SDMAINTENABLE (1U << IPATH_S_SDMAINTENABLE)
-#define INFINIPATH_S_SDMASINGLEDESCRIPTOR \
- (1U << IPATH_S_SDMASINGLEDESCRIPTOR)
-#define INFINIPATH_S_SDMAENABLE (1U << IPATH_S_SDMAENABLE)
-#define INFINIPATH_S_SDMAHALT (1U << IPATH_S_SDMAHALT)
-#define INFINIPATH_S_DISARM (1U << IPATH_S_DISARM)
-
-/* kr_rcvctrl bits that are the same on multiple chips */
-#define INFINIPATH_R_PORTENABLE_SHIFT 0
-#define INFINIPATH_R_QPMAP_ENABLE (1ULL << 38)
-
-/* kr_intstatus, kr_intclear, kr_intmask bits */
-#define INFINIPATH_I_SDMAINT 0x8000000000000000ULL
-#define INFINIPATH_I_SDMADISABLED 0x4000000000000000ULL
-#define INFINIPATH_I_ERROR 0x0000000080000000ULL
-#define INFINIPATH_I_SPIOSENT 0x0000000040000000ULL
-#define INFINIPATH_I_SPIOBUFAVAIL 0x0000000020000000ULL
-#define INFINIPATH_I_GPIO 0x0000000010000000ULL
-#define INFINIPATH_I_JINT 0x0000000004000000ULL
-
-/* kr_errorstatus, kr_errorclear, kr_errormask bits */
-#define INFINIPATH_E_RFORMATERR 0x0000000000000001ULL
-#define INFINIPATH_E_RVCRC 0x0000000000000002ULL
-#define INFINIPATH_E_RICRC 0x0000000000000004ULL
-#define INFINIPATH_E_RMINPKTLEN 0x0000000000000008ULL
-#define INFINIPATH_E_RMAXPKTLEN 0x0000000000000010ULL
-#define INFINIPATH_E_RLONGPKTLEN 0x0000000000000020ULL
-#define INFINIPATH_E_RSHORTPKTLEN 0x0000000000000040ULL
-#define INFINIPATH_E_RUNEXPCHAR 0x0000000000000080ULL
-#define INFINIPATH_E_RUNSUPVL 0x0000000000000100ULL
-#define INFINIPATH_E_REBP 0x0000000000000200ULL
-#define INFINIPATH_E_RIBFLOW 0x0000000000000400ULL
-#define INFINIPATH_E_RBADVERSION 0x0000000000000800ULL
-#define INFINIPATH_E_RRCVEGRFULL 0x0000000000001000ULL
-#define INFINIPATH_E_RRCVHDRFULL 0x0000000000002000ULL
-#define INFINIPATH_E_RBADTID 0x0000000000004000ULL
-#define INFINIPATH_E_RHDRLEN 0x0000000000008000ULL
-#define INFINIPATH_E_RHDR 0x0000000000010000ULL
-#define INFINIPATH_E_RIBLOSTLINK 0x0000000000020000ULL
-#define INFINIPATH_E_SENDSPECIALTRIGGER 0x0000000008000000ULL
-#define INFINIPATH_E_SDMADISABLED 0x0000000010000000ULL
-#define INFINIPATH_E_SMINPKTLEN 0x0000000020000000ULL
-#define INFINIPATH_E_SMAXPKTLEN 0x0000000040000000ULL
-#define INFINIPATH_E_SUNDERRUN 0x0000000080000000ULL
-#define INFINIPATH_E_SPKTLEN 0x0000000100000000ULL
-#define INFINIPATH_E_SDROPPEDSMPPKT 0x0000000200000000ULL
-#define INFINIPATH_E_SDROPPEDDATAPKT 0x0000000400000000ULL
-#define INFINIPATH_E_SPIOARMLAUNCH 0x0000000800000000ULL
-#define INFINIPATH_E_SUNEXPERRPKTNUM 0x0000001000000000ULL
-#define INFINIPATH_E_SUNSUPVL 0x0000002000000000ULL
-#define INFINIPATH_E_SENDBUFMISUSE 0x0000004000000000ULL
-#define INFINIPATH_E_SDMAGENMISMATCH 0x0000008000000000ULL
-#define INFINIPATH_E_SDMAOUTOFBOUND 0x0000010000000000ULL
-#define INFINIPATH_E_SDMATAILOUTOFBOUND 0x0000020000000000ULL
-#define INFINIPATH_E_SDMABASE 0x0000040000000000ULL
-#define INFINIPATH_E_SDMA1STDESC 0x0000080000000000ULL
-#define INFINIPATH_E_SDMARPYTAG 0x0000100000000000ULL
-#define INFINIPATH_E_SDMADWEN 0x0000200000000000ULL
-#define INFINIPATH_E_SDMAMISSINGDW 0x0000400000000000ULL
-#define INFINIPATH_E_SDMAUNEXPDATA 0x0000800000000000ULL
-#define INFINIPATH_E_IBSTATUSCHANGED 0x0001000000000000ULL
-#define INFINIPATH_E_INVALIDADDR 0x0002000000000000ULL
-#define INFINIPATH_E_RESET 0x0004000000000000ULL
-#define INFINIPATH_E_HARDWARE 0x0008000000000000ULL
-#define INFINIPATH_E_SDMADESCADDRMISALIGN 0x0010000000000000ULL
-#define INFINIPATH_E_INVALIDEEPCMD 0x0020000000000000ULL
-
-/*
- * this is used to print "common" packet errors only when the
- * __IPATH_ERRPKTDBG bit is set in ipath_debug.
- */
-#define INFINIPATH_E_PKTERRS ( INFINIPATH_E_SPKTLEN \
- | INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_RVCRC \
- | INFINIPATH_E_RICRC | INFINIPATH_E_RSHORTPKTLEN \
- | INFINIPATH_E_REBP )
-
-/* Convenience for decoding Send DMA errors */
-#define INFINIPATH_E_SDMAERRS ( \
- INFINIPATH_E_SDMAGENMISMATCH | INFINIPATH_E_SDMAOUTOFBOUND | \
- INFINIPATH_E_SDMATAILOUTOFBOUND | INFINIPATH_E_SDMABASE | \
- INFINIPATH_E_SDMA1STDESC | INFINIPATH_E_SDMARPYTAG | \
- INFINIPATH_E_SDMADWEN | INFINIPATH_E_SDMAMISSINGDW | \
- INFINIPATH_E_SDMAUNEXPDATA | \
- INFINIPATH_E_SDMADESCADDRMISALIGN | \
- INFINIPATH_E_SDMADISABLED | \
- INFINIPATH_E_SENDBUFMISUSE)
-
-/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
-/* TXEMEMPARITYERR bit 0: PIObuf, 1: PIOpbc, 2: launchfifo
- * RXEMEMPARITYERR bit 0: rcvbuf, 1: lookupq, 2: expTID, 3: eagerTID
- * bit 4: flag buffer, 5: datainfo, 6: header info */
-#define INFINIPATH_HWE_TXEMEMPARITYERR_MASK 0xFULL
-#define INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT 40
-#define INFINIPATH_HWE_RXEMEMPARITYERR_MASK 0x7FULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT 44
-#define INFINIPATH_HWE_IBCBUSTOSPCPARITYERR 0x4000000000000000ULL
-#define INFINIPATH_HWE_IBCBUSFRSPCPARITYERR 0x8000000000000000ULL
-/* txe mem parity errors (shift by INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) */
-#define INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF 0x1ULL
-#define INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC 0x2ULL
-#define INFINIPATH_HWE_TXEMEMPARITYERR_PIOLAUNCHFIFO 0x4ULL
-/* rxe mem parity errors (shift by INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) */
-#define INFINIPATH_HWE_RXEMEMPARITYERR_RCVBUF 0x01ULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_LOOKUPQ 0x02ULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_EXPTID 0x04ULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID 0x08ULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_FLAGBUF 0x10ULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_DATAINFO 0x20ULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_HDRINFO 0x40ULL
-/* waldo specific -- find the rest in ipath_6110.c */
-#define INFINIPATH_HWE_RXDSYNCMEMPARITYERR 0x0000000400000000ULL
-/* 6120/7220 specific -- find the rest in ipath_6120.c and ipath_7220.c */
-#define INFINIPATH_HWE_MEMBISTFAILED 0x0040000000000000ULL
-
-/* kr_hwdiagctrl bits */
-#define INFINIPATH_DC_FORCETXEMEMPARITYERR_MASK 0xFULL
-#define INFINIPATH_DC_FORCETXEMEMPARITYERR_SHIFT 40
-#define INFINIPATH_DC_FORCERXEMEMPARITYERR_MASK 0x7FULL
-#define INFINIPATH_DC_FORCERXEMEMPARITYERR_SHIFT 44
-#define INFINIPATH_DC_FORCERXDSYNCMEMPARITYERR 0x0000000400000000ULL
-#define INFINIPATH_DC_COUNTERDISABLE 0x1000000000000000ULL
-#define INFINIPATH_DC_COUNTERWREN 0x2000000000000000ULL
-#define INFINIPATH_DC_FORCEIBCBUSTOSPCPARITYERR 0x4000000000000000ULL
-#define INFINIPATH_DC_FORCEIBCBUSFRSPCPARITYERR 0x8000000000000000ULL
-
-/* kr_ibcctrl bits */
-#define INFINIPATH_IBCC_FLOWCTRLPERIOD_MASK 0xFFULL
-#define INFINIPATH_IBCC_FLOWCTRLPERIOD_SHIFT 0
-#define INFINIPATH_IBCC_FLOWCTRLWATERMARK_MASK 0xFFULL
-#define INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT 8
-#define INFINIPATH_IBCC_LINKINITCMD_MASK 0x3ULL
-#define INFINIPATH_IBCC_LINKINITCMD_DISABLE 1
-/* cycle through TS1/TS2 till OK */
-#define INFINIPATH_IBCC_LINKINITCMD_POLL 2
-/* wait for TS1, then go on */
-#define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3
-#define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16
-#define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL
-#define INFINIPATH_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
-#define INFINIPATH_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
-#define INFINIPATH_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
-#define INFINIPATH_IBCC_LINKCMD_SHIFT 18
-#define INFINIPATH_IBCC_MAXPKTLEN_MASK 0x7FFULL
-#define INFINIPATH_IBCC_MAXPKTLEN_SHIFT 20
-#define INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK 0xFULL
-#define INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT 32
-#define INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK 0xFULL
-#define INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT 36
-#define INFINIPATH_IBCC_CREDITSCALE_MASK 0x7ULL
-#define INFINIPATH_IBCC_CREDITSCALE_SHIFT 40
-#define INFINIPATH_IBCC_LOOPBACK 0x8000000000000000ULL
-#define INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE 0x4000000000000000ULL
-
-/* kr_ibcstatus bits */
-#define INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT 0
-#define INFINIPATH_IBCS_LINKSTATE_MASK 0x7
-
-#define INFINIPATH_IBCS_TXREADY 0x40000000
-#define INFINIPATH_IBCS_TXCREDITOK 0x80000000
-/* link training states (shift by
- INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) */
-#define INFINIPATH_IBCS_LT_STATE_DISABLED 0x00
-#define INFINIPATH_IBCS_LT_STATE_LINKUP 0x01
-#define INFINIPATH_IBCS_LT_STATE_POLLACTIVE 0x02
-#define INFINIPATH_IBCS_LT_STATE_POLLQUIET 0x03
-#define INFINIPATH_IBCS_LT_STATE_SLEEPDELAY 0x04
-#define INFINIPATH_IBCS_LT_STATE_SLEEPQUIET 0x05
-#define INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE 0x08
-#define INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG 0x09
-#define INFINIPATH_IBCS_LT_STATE_CFGWAITRMT 0x0a
-#define INFINIPATH_IBCS_LT_STATE_CFGIDLE 0x0b
-#define INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN 0x0c
-#define INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT 0x0e
-#define INFINIPATH_IBCS_LT_STATE_RECOVERIDLE 0x0f
-/* link state machine states (shift by ibcs_ls_shift) */
-#define INFINIPATH_IBCS_L_STATE_DOWN 0x0
-#define INFINIPATH_IBCS_L_STATE_INIT 0x1
-#define INFINIPATH_IBCS_L_STATE_ARM 0x2
-#define INFINIPATH_IBCS_L_STATE_ACTIVE 0x3
-#define INFINIPATH_IBCS_L_STATE_ACT_DEFER 0x4
-
-
-/* kr_extstatus bits */
-#define INFINIPATH_EXTS_SERDESPLLLOCK 0x1
-#define INFINIPATH_EXTS_GPIOIN_MASK 0xFFFFULL
-#define INFINIPATH_EXTS_GPIOIN_SHIFT 48
-
-/* kr_extctrl bits */
-#define INFINIPATH_EXTC_GPIOINVERT_MASK 0xFFFFULL
-#define INFINIPATH_EXTC_GPIOINVERT_SHIFT 32
-#define INFINIPATH_EXTC_GPIOOE_MASK 0xFFFFULL
-#define INFINIPATH_EXTC_GPIOOE_SHIFT 48
-#define INFINIPATH_EXTC_SERDESENABLE 0x80000000ULL
-#define INFINIPATH_EXTC_SERDESCONNECT 0x40000000ULL
-#define INFINIPATH_EXTC_SERDESENTRUNKING 0x20000000ULL
-#define INFINIPATH_EXTC_SERDESDISRXFIFO 0x10000000ULL
-#define INFINIPATH_EXTC_SERDESENPLPBK1 0x08000000ULL
-#define INFINIPATH_EXTC_SERDESENPLPBK2 0x04000000ULL
-#define INFINIPATH_EXTC_SERDESENENCDEC 0x02000000ULL
-#define INFINIPATH_EXTC_LED1SECPORT_ON 0x00000020ULL
-#define INFINIPATH_EXTC_LED2SECPORT_ON 0x00000010ULL
-#define INFINIPATH_EXTC_LED1PRIPORT_ON 0x00000008ULL
-#define INFINIPATH_EXTC_LED2PRIPORT_ON 0x00000004ULL
-#define INFINIPATH_EXTC_LEDGBLOK_ON 0x00000002ULL
-#define INFINIPATH_EXTC_LEDGBLERR_OFF 0x00000001ULL
-
-/* kr_partitionkey bits */
-#define INFINIPATH_PKEY_SIZE 16
-#define INFINIPATH_PKEY_MASK 0xFFFF
-#define INFINIPATH_PKEY_DEFAULT_PKEY 0xFFFF
-
-/* kr_serdesconfig0 bits */
-#define INFINIPATH_SERDC0_RESET_MASK 0xfULL /* overal reset bits */
-#define INFINIPATH_SERDC0_RESET_PLL 0x10000000ULL /* pll reset */
-/* tx idle enables (per lane) */
-#define INFINIPATH_SERDC0_TXIDLE 0xF000ULL
-/* rx detect enables (per lane) */
-#define INFINIPATH_SERDC0_RXDETECT_EN 0xF0000ULL
-/* L1 Power down; use with RXDETECT, Otherwise not used on IB side */
-#define INFINIPATH_SERDC0_L1PWR_DN 0xF0ULL
-
-/* common kr_xgxsconfig bits (or safe in all, even if not implemented) */
-#define INFINIPATH_XGXS_RX_POL_SHIFT 19
-#define INFINIPATH_XGXS_RX_POL_MASK 0xfULL
-
-
-/*
- * IPATH_PIO_MAXIBHDR is the max IB header size allowed for in our
- * PIO send buffers. This is well beyond anything currently
- * defined in the InfiniBand spec.
- */
-#define IPATH_PIO_MAXIBHDR 128
-
-typedef u64 ipath_err_t;
-
-/* The following change with the type of device, so
- * need to be part of the ipath_devdata struct, or
- * we could have problems plugging in devices of
- * different types (e.g. one HT, one PCIE)
- * in one system, to be managed by one driver.
- * On the other hand, this file is may also be included
- * by other code, so leave the declarations here
- * temporarily. Minor footprint issue if common-model
- * linker used, none if C89+ linker used.
- */
-
-/* mask of defined bits for various registers */
-extern u64 infinipath_i_bitsextant;
-extern ipath_err_t infinipath_e_bitsextant, infinipath_hwe_bitsextant;
-
-/* masks that are different in various chips, or only exist in some chips */
-extern u32 infinipath_i_rcvavail_mask, infinipath_i_rcvurg_mask;
-
-/*
- * These are the infinipath general register numbers (not offsets).
- * The kernel registers are used directly, those beyond the kernel
- * registers are calculated from one of the base registers. The use of
- * an integer type doesn't allow type-checking as thorough as, say,
- * an enum but allows for better hiding of chip differences.
- */
-typedef const u16 ipath_kreg, /* infinipath general registers */
- ipath_creg, /* infinipath counter registers */
- ipath_sreg; /* kernel-only, infinipath send registers */
-
-/*
- * These are the chip registers common to all infinipath chips, and
- * used both by the kernel and the diagnostics or other user code.
- * They are all implemented such that 64 bit accesses work.
- * Some implement no more than 32 bits. Because 64 bit reads
- * require 2 HT cmds on opteron, we access those with 32 bit
- * reads for efficiency (they are written as 64 bits, since
- * the extra 32 bits are nearly free on writes, and it slightly reduces
- * complexity). The rest are all accessed as 64 bits.
- */
-struct ipath_kregs {
- /* These are the 32 bit group */
- ipath_kreg kr_control;
- ipath_kreg kr_counterregbase;
- ipath_kreg kr_intmask;
- ipath_kreg kr_intstatus;
- ipath_kreg kr_pagealign;
- ipath_kreg kr_portcnt;
- ipath_kreg kr_rcvtidbase;
- ipath_kreg kr_rcvtidcnt;
- ipath_kreg kr_rcvegrbase;
- ipath_kreg kr_rcvegrcnt;
- ipath_kreg kr_scratch;
- ipath_kreg kr_sendctrl;
- ipath_kreg kr_sendpiobufbase;
- ipath_kreg kr_sendpiobufcnt;
- ipath_kreg kr_sendpiosize;
- ipath_kreg kr_sendregbase;
- ipath_kreg kr_userregbase;
- /* These are the 64 bit group */
- ipath_kreg kr_debugport;
- ipath_kreg kr_debugportselect;
- ipath_kreg kr_errorclear;
- ipath_kreg kr_errormask;
- ipath_kreg kr_errorstatus;
- ipath_kreg kr_extctrl;
- ipath_kreg kr_extstatus;
- ipath_kreg kr_gpio_clear;
- ipath_kreg kr_gpio_mask;
- ipath_kreg kr_gpio_out;
- ipath_kreg kr_gpio_status;
- ipath_kreg kr_hwdiagctrl;
- ipath_kreg kr_hwerrclear;
- ipath_kreg kr_hwerrmask;
- ipath_kreg kr_hwerrstatus;
- ipath_kreg kr_ibcctrl;
- ipath_kreg kr_ibcstatus;
- ipath_kreg kr_intblocked;
- ipath_kreg kr_intclear;
- ipath_kreg kr_interruptconfig;
- ipath_kreg kr_mdio;
- ipath_kreg kr_partitionkey;
- ipath_kreg kr_rcvbthqp;
- ipath_kreg kr_rcvbufbase;
- ipath_kreg kr_rcvbufsize;
- ipath_kreg kr_rcvctrl;
- ipath_kreg kr_rcvhdrcnt;
- ipath_kreg kr_rcvhdrentsize;
- ipath_kreg kr_rcvhdrsize;
- ipath_kreg kr_rcvintmembase;
- ipath_kreg kr_rcvintmemsize;
- ipath_kreg kr_revision;
- ipath_kreg kr_sendbuffererror;
- ipath_kreg kr_sendpioavailaddr;
- ipath_kreg kr_serdesconfig0;
- ipath_kreg kr_serdesconfig1;
- ipath_kreg kr_serdesstatus;
- ipath_kreg kr_txintmembase;
- ipath_kreg kr_txintmemsize;
- ipath_kreg kr_xgxsconfig;
- ipath_kreg kr_ibpllcfg;
- /* use these two (and the following N ports) only with
- * ipath_k*_kreg64_port(); not *kreg64() */
- ipath_kreg kr_rcvhdraddr;
- ipath_kreg kr_rcvhdrtailaddr;
-
- /* remaining registers are not present on all types of infinipath
- chips */
- ipath_kreg kr_rcvpktledcnt;
- ipath_kreg kr_pcierbuftestreg0;
- ipath_kreg kr_pcierbuftestreg1;
- ipath_kreg kr_pcieq0serdesconfig0;
- ipath_kreg kr_pcieq0serdesconfig1;
- ipath_kreg kr_pcieq0serdesstatus;
- ipath_kreg kr_pcieq1serdesconfig0;
- ipath_kreg kr_pcieq1serdesconfig1;
- ipath_kreg kr_pcieq1serdesstatus;
- ipath_kreg kr_hrtbt_guid;
- ipath_kreg kr_ibcddrctrl;
- ipath_kreg kr_ibcddrstatus;
- ipath_kreg kr_jintreload;
-
- /* send dma related regs */
- ipath_kreg kr_senddmabase;
- ipath_kreg kr_senddmalengen;
- ipath_kreg kr_senddmatail;
- ipath_kreg kr_senddmahead;
- ipath_kreg kr_senddmaheadaddr;
- ipath_kreg kr_senddmabufmask0;
- ipath_kreg kr_senddmabufmask1;
- ipath_kreg kr_senddmabufmask2;
- ipath_kreg kr_senddmastatus;
-
- /* SerDes related regs (IBA7220-only) */
- ipath_kreg kr_ibserdesctrl;
- ipath_kreg kr_ib_epbacc;
- ipath_kreg kr_ib_epbtrans;
- ipath_kreg kr_pcie_epbacc;
- ipath_kreg kr_pcie_epbtrans;
- ipath_kreg kr_ib_ddsrxeq;
-};
-
-struct ipath_cregs {
- ipath_creg cr_badformatcnt;
- ipath_creg cr_erricrccnt;
- ipath_creg cr_errlinkcnt;
- ipath_creg cr_errlpcrccnt;
- ipath_creg cr_errpkey;
- ipath_creg cr_errrcvflowctrlcnt;
- ipath_creg cr_err_rlencnt;
- ipath_creg cr_errslencnt;
- ipath_creg cr_errtidfull;
- ipath_creg cr_errtidvalid;
- ipath_creg cr_errvcrccnt;
- ipath_creg cr_ibstatuschange;
- ipath_creg cr_intcnt;
- ipath_creg cr_invalidrlencnt;
- ipath_creg cr_invalidslencnt;
- ipath_creg cr_lbflowstallcnt;
- ipath_creg cr_iblinkdowncnt;
- ipath_creg cr_iblinkerrrecovcnt;
- ipath_creg cr_ibsymbolerrcnt;
- ipath_creg cr_pktrcvcnt;
- ipath_creg cr_pktrcvflowctrlcnt;
- ipath_creg cr_pktsendcnt;
- ipath_creg cr_pktsendflowcnt;
- ipath_creg cr_portovflcnt;
- ipath_creg cr_rcvebpcnt;
- ipath_creg cr_rcvovflcnt;
- ipath_creg cr_rxdroppktcnt;
- ipath_creg cr_senddropped;
- ipath_creg cr_sendstallcnt;
- ipath_creg cr_sendunderruncnt;
- ipath_creg cr_unsupvlcnt;
- ipath_creg cr_wordrcvcnt;
- ipath_creg cr_wordsendcnt;
- ipath_creg cr_vl15droppedpktcnt;
- ipath_creg cr_rxotherlocalphyerrcnt;
- ipath_creg cr_excessbufferovflcnt;
- ipath_creg cr_locallinkintegrityerrcnt;
- ipath_creg cr_rxvlerrcnt;
- ipath_creg cr_rxdlidfltrcnt;
- ipath_creg cr_psstat;
- ipath_creg cr_psstart;
- ipath_creg cr_psinterval;
- ipath_creg cr_psrcvdatacount;
- ipath_creg cr_psrcvpktscount;
- ipath_creg cr_psxmitdatacount;
- ipath_creg cr_psxmitpktscount;
- ipath_creg cr_psxmitwaitcount;
-};
-
-#endif /* _IPATH_REGISTERS_H */
diff --git a/drivers/staging/rdma/ipath/ipath_ruc.c b/drivers/staging/rdma/ipath/ipath_ruc.c
deleted file mode 100644
index e541a01f1f61..000000000000
--- a/drivers/staging/rdma/ipath/ipath_ruc.c
+++ /dev/null
@@ -1,733 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/spinlock.h>
-
-#include "ipath_verbs.h"
-#include "ipath_kernel.h"
-
-/*
- * Convert the AETH RNR timeout code into the number of milliseconds.
- */
-const u32 ib_ipath_rnr_table[32] = {
- 656, /* 0 */
- 1, /* 1 */
- 1, /* 2 */
- 1, /* 3 */
- 1, /* 4 */
- 1, /* 5 */
- 1, /* 6 */
- 1, /* 7 */
- 1, /* 8 */
- 1, /* 9 */
- 1, /* A */
- 1, /* B */
- 1, /* C */
- 1, /* D */
- 2, /* E */
- 2, /* F */
- 3, /* 10 */
- 4, /* 11 */
- 6, /* 12 */
- 8, /* 13 */
- 11, /* 14 */
- 16, /* 15 */
- 21, /* 16 */
- 31, /* 17 */
- 41, /* 18 */
- 62, /* 19 */
- 82, /* 1A */
- 123, /* 1B */
- 164, /* 1C */
- 246, /* 1D */
- 328, /* 1E */
- 492 /* 1F */
-};
-
-/**
- * ipath_insert_rnr_queue - put QP on the RNR timeout list for the device
- * @qp: the QP
- *
- * Called with the QP s_lock held and interrupts disabled.
- * XXX Use a simple list for now. We might need a priority
- * queue if we have lots of QPs waiting for RNR timeouts
- * but that should be rare.
- */
-void ipath_insert_rnr_queue(struct ipath_qp *qp)
-{
- struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-
- /* We already did a spin_lock_irqsave(), so just use spin_lock */
- spin_lock(&dev->pending_lock);
- if (list_empty(&dev->rnrwait))
- list_add(&qp->timerwait, &dev->rnrwait);
- else {
- struct list_head *l = &dev->rnrwait;
- struct ipath_qp *nqp = list_entry(l->next, struct ipath_qp,
- timerwait);
-
- while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) {
- qp->s_rnr_timeout -= nqp->s_rnr_timeout;
- l = l->next;
- if (l->next == &dev->rnrwait) {
- nqp = NULL;
- break;
- }
- nqp = list_entry(l->next, struct ipath_qp,
- timerwait);
- }
- if (nqp)
- nqp->s_rnr_timeout -= qp->s_rnr_timeout;
- list_add(&qp->timerwait, l);
- }
- spin_unlock(&dev->pending_lock);
-}
-
-/**
- * ipath_init_sge - Validate a RWQE and fill in the SGE state
- * @qp: the QP
- *
- * Return 1 if OK.
- */
-int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
- u32 *lengthp, struct ipath_sge_state *ss)
-{
- int i, j, ret;
- struct ib_wc wc;
-
- *lengthp = 0;
- for (i = j = 0; i < wqe->num_sge; i++) {
- if (wqe->sg_list[i].length == 0)
- continue;
- /* Check LKEY */
- if (!ipath_lkey_ok(qp, j ? &ss->sg_list[j - 1] : &ss->sge,
- &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
- goto bad_lkey;
- *lengthp += wqe->sg_list[i].length;
- j++;
- }
- ss->num_sge = j;
- ret = 1;
- goto bail;
-
-bad_lkey:
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr_id;
- wc.status = IB_WC_LOC_PROT_ERR;
- wc.opcode = IB_WC_RECV;
- wc.qp = &qp->ibqp;
- /* Signal solicited completion event. */
- ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
- ret = 0;
-bail:
- return ret;
-}
-
-/**
- * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
- * @qp: the QP
- * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
- *
- * Return 0 if no RWQE is available, otherwise return 1.
- *
- * Can be called from interrupt level.
- */
-int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
-{
- unsigned long flags;
- struct ipath_rq *rq;
- struct ipath_rwq *wq;
- struct ipath_srq *srq;
- struct ipath_rwqe *wqe;
- void (*handler)(struct ib_event *, void *);
- u32 tail;
- int ret;
-
- if (qp->ibqp.srq) {
- srq = to_isrq(qp->ibqp.srq);
- handler = srq->ibsrq.event_handler;
- rq = &srq->rq;
- } else {
- srq = NULL;
- handler = NULL;
- rq = &qp->r_rq;
- }
-
- spin_lock_irqsave(&rq->lock, flags);
- if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
- ret = 0;
- goto unlock;
- }
-
- wq = rq->wq;
- tail = wq->tail;
- /* Validate tail before using it since it is user writable. */
- if (tail >= rq->size)
- tail = 0;
- do {
- if (unlikely(tail == wq->head)) {
- ret = 0;
- goto unlock;
- }
- /* Make sure entry is read after head index is read. */
- smp_rmb();
- wqe = get_rwqe_ptr(rq, tail);
- if (++tail >= rq->size)
- tail = 0;
- if (wr_id_only)
- break;
- qp->r_sge.sg_list = qp->r_sg_list;
- } while (!ipath_init_sge(qp, wqe, &qp->r_len, &qp->r_sge));
- qp->r_wr_id = wqe->wr_id;
- wq->tail = tail;
-
- ret = 1;
- set_bit(IPATH_R_WRID_VALID, &qp->r_aflags);
- if (handler) {
- u32 n;
-
- /*
- * validate head pointer value and compute
- * the number of remaining WQEs.
- */
- n = wq->head;
- if (n >= rq->size)
- n = 0;
- if (n < tail)
- n += rq->size - tail;
- else
- n -= tail;
- if (n < srq->limit) {
- struct ib_event ev;
-
- srq->limit = 0;
- spin_unlock_irqrestore(&rq->lock, flags);
- ev.device = qp->ibqp.device;
- ev.element.srq = qp->ibqp.srq;
- ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
- handler(&ev, srq->ibsrq.srq_context);
- goto bail;
- }
- }
-unlock:
- spin_unlock_irqrestore(&rq->lock, flags);
-bail:
- return ret;
-}
-
-/**
- * ipath_ruc_loopback - handle UC and RC lookback requests
- * @sqp: the sending QP
- *
- * This is called from ipath_do_send() to
- * forward a WQE addressed to the same HCA.
- * Note that although we are single threaded due to the tasklet, we still
- * have to protect against post_send(). We don't have to worry about
- * receive interrupts since this is a connected protocol and all packets
- * will pass through here.
- */
-static void ipath_ruc_loopback(struct ipath_qp *sqp)
-{
- struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
- struct ipath_qp *qp;
- struct ipath_swqe *wqe;
- struct ipath_sge *sge;
- unsigned long flags;
- struct ib_wc wc;
- u64 sdata;
- atomic64_t *maddr;
- enum ib_wc_status send_status;
-
- /*
- * Note that we check the responder QP state after
- * checking the requester's state.
- */
- qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
-
- spin_lock_irqsave(&sqp->s_lock, flags);
-
- /* Return if we are already busy processing a work request. */
- if ((sqp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) ||
- !(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_OR_FLUSH_SEND))
- goto unlock;
-
- sqp->s_flags |= IPATH_S_BUSY;
-
-again:
- if (sqp->s_last == sqp->s_head)
- goto clr_busy;
- wqe = get_swqe_ptr(sqp, sqp->s_last);
-
- /* Return if it is not OK to start a new work reqeust. */
- if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_NEXT_SEND_OK)) {
- if (!(ib_ipath_state_ops[sqp->state] & IPATH_FLUSH_SEND))
- goto clr_busy;
- /* We are in the error state, flush the work request. */
- send_status = IB_WC_WR_FLUSH_ERR;
- goto flush_send;
- }
-
- /*
- * We can rely on the entry not changing without the s_lock
- * being held until we update s_last.
- * We increment s_cur to indicate s_last is in progress.
- */
- if (sqp->s_last == sqp->s_cur) {
- if (++sqp->s_cur >= sqp->s_size)
- sqp->s_cur = 0;
- }
- spin_unlock_irqrestore(&sqp->s_lock, flags);
-
- if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
- dev->n_pkt_drops++;
- /*
- * For RC, the requester would timeout and retry so
- * shortcut the timeouts and just signal too many retries.
- */
- if (sqp->ibqp.qp_type == IB_QPT_RC)
- send_status = IB_WC_RETRY_EXC_ERR;
- else
- send_status = IB_WC_SUCCESS;
- goto serr;
- }
-
- memset(&wc, 0, sizeof wc);
- send_status = IB_WC_SUCCESS;
-
- sqp->s_sge.sge = wqe->sg_list[0];
- sqp->s_sge.sg_list = wqe->sg_list + 1;
- sqp->s_sge.num_sge = wqe->wr.num_sge;
- sqp->s_len = wqe->length;
- switch (wqe->wr.opcode) {
- case IB_WR_SEND_WITH_IMM:
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = wqe->wr.ex.imm_data;
- /* FALLTHROUGH */
- case IB_WR_SEND:
- if (!ipath_get_rwqe(qp, 0))
- goto rnr_nak;
- break;
-
- case IB_WR_RDMA_WRITE_WITH_IMM:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
- goto inv_err;
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = wqe->wr.ex.imm_data;
- if (!ipath_get_rwqe(qp, 1))
- goto rnr_nak;
- /* FALLTHROUGH */
- case IB_WR_RDMA_WRITE:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
- goto inv_err;
- if (wqe->length == 0)
- break;
- if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length,
- wqe->rdma_wr.remote_addr,
- wqe->rdma_wr.rkey,
- IB_ACCESS_REMOTE_WRITE)))
- goto acc_err;
- break;
-
- case IB_WR_RDMA_READ:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
- goto inv_err;
- if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length,
- wqe->rdma_wr.remote_addr,
- wqe->rdma_wr.rkey,
- IB_ACCESS_REMOTE_READ)))
- goto acc_err;
- qp->r_sge.sge = wqe->sg_list[0];
- qp->r_sge.sg_list = wqe->sg_list + 1;
- qp->r_sge.num_sge = wqe->wr.num_sge;
- break;
-
- case IB_WR_ATOMIC_CMP_AND_SWP:
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
- goto inv_err;
- if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64),
- wqe->atomic_wr.remote_addr,
- wqe->atomic_wr.rkey,
- IB_ACCESS_REMOTE_ATOMIC)))
- goto acc_err;
- /* Perform atomic OP and save result. */
- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
- sdata = wqe->atomic_wr.compare_add;
- *(u64 *) sqp->s_sge.sge.vaddr =
- (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
- (u64) atomic64_add_return(sdata, maddr) - sdata :
- (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
- sdata, wqe->atomic_wr.swap);
- goto send_comp;
-
- default:
- send_status = IB_WC_LOC_QP_OP_ERR;
- goto serr;
- }
-
- sge = &sqp->s_sge.sge;
- while (sqp->s_len) {
- u32 len = sqp->s_len;
-
- if (len > sge->length)
- len = sge->length;
- if (len > sge->sge_length)
- len = sge->sge_length;
- BUG_ON(len == 0);
- ipath_copy_sge(&qp->r_sge, sge->vaddr, len);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (--sqp->s_sge.num_sge)
- *sge = *sqp->s_sge.sg_list++;
- } else if (sge->length == 0 && sge->mr != NULL) {
- if (++sge->n >= IPATH_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- sqp->s_len -= len;
- }
-
- if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags))
- goto send_comp;
-
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
- wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- else
- wc.opcode = IB_WC_RECV;
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.byte_len = wqe->length;
- wc.qp = &qp->ibqp;
- wc.src_qp = qp->remote_qpn;
- wc.slid = qp->remote_ah_attr.dlid;
- wc.sl = qp->remote_ah_attr.sl;
- wc.port_num = 1;
- /* Signal completion event if the solicited bit is set. */
- ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
- wqe->wr.send_flags & IB_SEND_SOLICITED);
-
-send_comp:
- spin_lock_irqsave(&sqp->s_lock, flags);
-flush_send:
- sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
- ipath_send_complete(sqp, wqe, send_status);
- goto again;
-
-rnr_nak:
- /* Handle RNR NAK */
- if (qp->ibqp.qp_type == IB_QPT_UC)
- goto send_comp;
- /*
- * Note: we don't need the s_lock held since the BUSY flag
- * makes this single threaded.
- */
- if (sqp->s_rnr_retry == 0) {
- send_status = IB_WC_RNR_RETRY_EXC_ERR;
- goto serr;
- }
- if (sqp->s_rnr_retry_cnt < 7)
- sqp->s_rnr_retry--;
- spin_lock_irqsave(&sqp->s_lock, flags);
- if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_RECV_OK))
- goto clr_busy;
- sqp->s_flags |= IPATH_S_WAITING;
- dev->n_rnr_naks++;
- sqp->s_rnr_timeout = ib_ipath_rnr_table[qp->r_min_rnr_timer];
- ipath_insert_rnr_queue(sqp);
- goto clr_busy;
-
-inv_err:
- send_status = IB_WC_REM_INV_REQ_ERR;
- wc.status = IB_WC_LOC_QP_OP_ERR;
- goto err;
-
-acc_err:
- send_status = IB_WC_REM_ACCESS_ERR;
- wc.status = IB_WC_LOC_PROT_ERR;
-err:
- /* responder goes to error state */
- ipath_rc_error(qp, wc.status);
-
-serr:
- spin_lock_irqsave(&sqp->s_lock, flags);
- ipath_send_complete(sqp, wqe, send_status);
- if (sqp->ibqp.qp_type == IB_QPT_RC) {
- int lastwqe = ipath_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
-
- sqp->s_flags &= ~IPATH_S_BUSY;
- spin_unlock_irqrestore(&sqp->s_lock, flags);
- if (lastwqe) {
- struct ib_event ev;
-
- ev.device = sqp->ibqp.device;
- ev.element.qp = &sqp->ibqp;
- ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
- sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
- }
- goto done;
- }
-clr_busy:
- sqp->s_flags &= ~IPATH_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&sqp->s_lock, flags);
-done:
- if (qp && atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
-}
-
-static void want_buffer(struct ipath_devdata *dd, struct ipath_qp *qp)
-{
- if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA) ||
- qp->ibqp.qp_type == IB_QPT_SMI) {
- unsigned long flags;
-
- spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
- dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- dd->ipath_sendctrl);
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
- }
-}
-
-/**
- * ipath_no_bufs_available - tell the layer driver we need buffers
- * @qp: the QP that caused the problem
- * @dev: the device we ran out of buffers on
- *
- * Called when we run out of PIO buffers.
- * If we are now in the error state, return zero to flush the
- * send work request.
- */
-static int ipath_no_bufs_available(struct ipath_qp *qp,
- struct ipath_ibdev *dev)
-{
- unsigned long flags;
- int ret = 1;
-
- /*
- * Note that as soon as want_buffer() is called and
- * possibly before it returns, ipath_ib_piobufavail()
- * could be called. Therefore, put QP on the piowait list before
- * enabling the PIO avail interrupt.
- */
- spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) {
- dev->n_piowait++;
- qp->s_flags |= IPATH_S_WAITING;
- qp->s_flags &= ~IPATH_S_BUSY;
- spin_lock(&dev->pending_lock);
- if (list_empty(&qp->piowait))
- list_add_tail(&qp->piowait, &dev->piowait);
- spin_unlock(&dev->pending_lock);
- } else
- ret = 0;
- spin_unlock_irqrestore(&qp->s_lock, flags);
- if (ret)
- want_buffer(dev->dd, qp);
- return ret;
-}
-
-/**
- * ipath_make_grh - construct a GRH header
- * @dev: a pointer to the ipath device
- * @hdr: a pointer to the GRH header being constructed
- * @grh: the global route address to send to
- * @hwords: the number of 32 bit words of header being sent
- * @nwords: the number of 32 bit words of data being sent
- *
- * Return the size of the header in 32 bit words.
- */
-u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
- struct ib_global_route *grh, u32 hwords, u32 nwords)
-{
- hdr->version_tclass_flow =
- cpu_to_be32((6 << 28) |
- (grh->traffic_class << 20) |
- grh->flow_label);
- hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
- /* next_hdr is defined by C8-7 in ch. 8.4.1 */
- hdr->next_hdr = 0x1B;
- hdr->hop_limit = grh->hop_limit;
- /* The SGID is 32-bit aligned. */
- hdr->sgid.global.subnet_prefix = dev->gid_prefix;
- hdr->sgid.global.interface_id = dev->dd->ipath_guid;
- hdr->dgid = grh->dgid;
-
- /* GRH header size in 32-bit words. */
- return sizeof(struct ib_grh) / sizeof(u32);
-}
-
-void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,
- struct ipath_other_headers *ohdr,
- u32 bth0, u32 bth2)
-{
- u16 lrh0;
- u32 nwords;
- u32 extra_bytes;
-
- /* Construct the header. */
- extra_bytes = -qp->s_cur_size & 3;
- nwords = (qp->s_cur_size + extra_bytes) >> 2;
- lrh0 = IPATH_LRH_BTH;
- if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
- qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh,
- &qp->remote_ah_attr.grh,
- qp->s_hdrwords, nwords);
- lrh0 = IPATH_LRH_GRH;
- }
- lrh0 |= qp->remote_ah_attr.sl << 4;
- qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
- qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
- qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
- qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid |
- qp->remote_ah_attr.src_path_bits);
- bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index);
- bth0 |= extra_bytes << 20;
- ohdr->bth[0] = cpu_to_be32(bth0 | (1 << 22));
- ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
- ohdr->bth[2] = cpu_to_be32(bth2);
-}
-
-/**
- * ipath_do_send - perform a send on a QP
- * @data: contains a pointer to the QP
- *
- * Process entries in the send work queue until credit or queue is
- * exhausted. Only allow one CPU to send a packet per QP (tasklet).
- * Otherwise, two threads could send packets out of order.
- */
-void ipath_do_send(unsigned long data)
-{
- struct ipath_qp *qp = (struct ipath_qp *)data;
- struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
- int (*make_req)(struct ipath_qp *qp);
- unsigned long flags;
-
- if ((qp->ibqp.qp_type == IB_QPT_RC ||
- qp->ibqp.qp_type == IB_QPT_UC) &&
- qp->remote_ah_attr.dlid == dev->dd->ipath_lid) {
- ipath_ruc_loopback(qp);
- goto bail;
- }
-
- if (qp->ibqp.qp_type == IB_QPT_RC)
- make_req = ipath_make_rc_req;
- else if (qp->ibqp.qp_type == IB_QPT_UC)
- make_req = ipath_make_uc_req;
- else
- make_req = ipath_make_ud_req;
-
- spin_lock_irqsave(&qp->s_lock, flags);
-
- /* Return if we are already busy processing a work request. */
- if ((qp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) ||
- !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND)) {
- spin_unlock_irqrestore(&qp->s_lock, flags);
- goto bail;
- }
-
- qp->s_flags |= IPATH_S_BUSY;
-
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
-again:
- /* Check for a constructed packet to be sent. */
- if (qp->s_hdrwords != 0) {
- /*
- * If no PIO bufs are available, return. An interrupt will
- * call ipath_ib_piobufavail() when one is available.
- */
- if (ipath_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords,
- qp->s_cur_sge, qp->s_cur_size)) {
- if (ipath_no_bufs_available(qp, dev))
- goto bail;
- }
- dev->n_unicast_xmit++;
- /* Record that we sent the packet and s_hdr is empty. */
- qp->s_hdrwords = 0;
- }
-
- if (make_req(qp))
- goto again;
-
-bail:;
-}
-
-/*
- * This should be called with s_lock held.
- */
-void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
- enum ib_wc_status status)
-{
- u32 old_last, last;
-
- if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND))
- return;
-
- /* See ch. 11.2.4.1 and 10.7.3.1 */
- if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
- (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
- status != IB_WC_SUCCESS) {
- struct ib_wc wc;
-
- memset(&wc, 0, sizeof wc);
- wc.wr_id = wqe->wr.wr_id;
- wc.status = status;
- wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
- wc.qp = &qp->ibqp;
- if (status == IB_WC_SUCCESS)
- wc.byte_len = wqe->length;
- ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
- status != IB_WC_SUCCESS);
- }
-
- old_last = last = qp->s_last;
- if (++last >= qp->s_size)
- last = 0;
- qp->s_last = last;
- if (qp->s_cur == old_last)
- qp->s_cur = last;
- if (qp->s_tail == old_last)
- qp->s_tail = last;
- if (qp->state == IB_QPS_SQD && last == qp->s_cur)
- qp->s_draining = 0;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_sdma.c b/drivers/staging/rdma/ipath/ipath_sdma.c
deleted file mode 100644
index 1ffc06abf9da..000000000000
--- a/drivers/staging/rdma/ipath/ipath_sdma.c
+++ /dev/null
@@ -1,818 +0,0 @@
-/*
- * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/spinlock.h>
-#include <linux/gfp.h>
-
-#include "ipath_kernel.h"
-#include "ipath_verbs.h"
-#include "ipath_common.h"
-
-#define SDMA_DESCQ_SZ PAGE_SIZE /* 256 entries per 4KB page */
-
-static void vl15_watchdog_enq(struct ipath_devdata *dd)
-{
- /* ipath_sdma_lock must already be held */
- if (atomic_inc_return(&dd->ipath_sdma_vl15_count) == 1) {
- unsigned long interval = (HZ + 19) / 20;
- dd->ipath_sdma_vl15_timer.expires = jiffies + interval;
- add_timer(&dd->ipath_sdma_vl15_timer);
- }
-}
-
-static void vl15_watchdog_deq(struct ipath_devdata *dd)
-{
- /* ipath_sdma_lock must already be held */
- if (atomic_dec_return(&dd->ipath_sdma_vl15_count) != 0) {
- unsigned long interval = (HZ + 19) / 20;
- mod_timer(&dd->ipath_sdma_vl15_timer, jiffies + interval);
- } else {
- del_timer(&dd->ipath_sdma_vl15_timer);
- }
-}
-
-static void vl15_watchdog_timeout(unsigned long opaque)
-{
- struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
-
- if (atomic_read(&dd->ipath_sdma_vl15_count) != 0) {
- ipath_dbg("vl15 watchdog timeout - clearing\n");
- ipath_cancel_sends(dd, 1);
- ipath_hol_down(dd);
- } else {
- ipath_dbg("vl15 watchdog timeout - "
- "condition already cleared\n");
- }
-}
-
-static void unmap_desc(struct ipath_devdata *dd, unsigned head)
-{
- __le64 *descqp = &dd->ipath_sdma_descq[head].qw[0];
- u64 desc[2];
- dma_addr_t addr;
- size_t len;
-
- desc[0] = le64_to_cpu(descqp[0]);
- desc[1] = le64_to_cpu(descqp[1]);
-
- addr = (desc[1] << 32) | (desc[0] >> 32);
- len = (desc[0] >> 14) & (0x7ffULL << 2);
- dma_unmap_single(&dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
-}
-
-/*
- * ipath_sdma_lock should be locked before calling this.
- */
-int ipath_sdma_make_progress(struct ipath_devdata *dd)
-{
- struct list_head *lp = NULL;
- struct ipath_sdma_txreq *txp = NULL;
- u16 dmahead;
- u16 start_idx = 0;
- int progress = 0;
-
- if (!list_empty(&dd->ipath_sdma_activelist)) {
- lp = dd->ipath_sdma_activelist.next;
- txp = list_entry(lp, struct ipath_sdma_txreq, list);
- start_idx = txp->start_idx;
- }
-
- /*
- * Read the SDMA head register in order to know that the
- * interrupt clear has been written to the chip.
- * Otherwise, we may not get an interrupt for the last
- * descriptor in the queue.
- */
- dmahead = (u16)ipath_read_kreg32(dd, dd->ipath_kregs->kr_senddmahead);
- /* sanity check return value for error handling (chip reset, etc.) */
- if (dmahead >= dd->ipath_sdma_descq_cnt)
- goto done;
-
- while (dd->ipath_sdma_descq_head != dmahead) {
- if (txp && txp->flags & IPATH_SDMA_TXREQ_F_FREEDESC &&
- dd->ipath_sdma_descq_head == start_idx) {
- unmap_desc(dd, dd->ipath_sdma_descq_head);
- start_idx++;
- if (start_idx == dd->ipath_sdma_descq_cnt)
- start_idx = 0;
- }
-
- /* increment free count and head */
- dd->ipath_sdma_descq_removed++;
- if (++dd->ipath_sdma_descq_head == dd->ipath_sdma_descq_cnt)
- dd->ipath_sdma_descq_head = 0;
-
- if (txp && txp->next_descq_idx == dd->ipath_sdma_descq_head) {
- /* move to notify list */
- if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
- vl15_watchdog_deq(dd);
- list_move_tail(lp, &dd->ipath_sdma_notifylist);
- if (!list_empty(&dd->ipath_sdma_activelist)) {
- lp = dd->ipath_sdma_activelist.next;
- txp = list_entry(lp, struct ipath_sdma_txreq,
- list);
- start_idx = txp->start_idx;
- } else {
- lp = NULL;
- txp = NULL;
- }
- }
- progress = 1;
- }
-
- if (progress)
- tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
-
-done:
- return progress;
-}
-
-static void ipath_sdma_notify(struct ipath_devdata *dd, struct list_head *list)
-{
- struct ipath_sdma_txreq *txp, *txp_next;
-
- list_for_each_entry_safe(txp, txp_next, list, list) {
- list_del_init(&txp->list);
-
- if (txp->callback)
- (*txp->callback)(txp->callback_cookie,
- txp->callback_status);
- }
-}
-
-static void sdma_notify_taskbody(struct ipath_devdata *dd)
-{
- unsigned long flags;
- struct list_head list;
-
- INIT_LIST_HEAD(&list);
-
- spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-
- list_splice_init(&dd->ipath_sdma_notifylist, &list);
-
- spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
- ipath_sdma_notify(dd, &list);
-
- /*
- * The IB verbs layer needs to see the callback before getting
- * the call to ipath_ib_piobufavail() because the callback
- * handles releasing resources the next send will need.
- * Otherwise, we could do these calls in
- * ipath_sdma_make_progress().
- */
- ipath_ib_piobufavail(dd->verbs_dev);
-}
-
-static void sdma_notify_task(unsigned long opaque)
-{
- struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
-
- if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
- sdma_notify_taskbody(dd);
-}
-
-static void dump_sdma_state(struct ipath_devdata *dd)
-{
- unsigned long reg;
-
- reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmastatus);
- ipath_cdbg(VERBOSE, "kr_senddmastatus: 0x%016lx\n", reg);
-
- reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendctrl);
- ipath_cdbg(VERBOSE, "kr_sendctrl: 0x%016lx\n", reg);
-
- reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask0);
- ipath_cdbg(VERBOSE, "kr_senddmabufmask0: 0x%016lx\n", reg);
-
- reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask1);
- ipath_cdbg(VERBOSE, "kr_senddmabufmask1: 0x%016lx\n", reg);
-
- reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask2);
- ipath_cdbg(VERBOSE, "kr_senddmabufmask2: 0x%016lx\n", reg);
-
- reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
- ipath_cdbg(VERBOSE, "kr_senddmatail: 0x%016lx\n", reg);
-
- reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
- ipath_cdbg(VERBOSE, "kr_senddmahead: 0x%016lx\n", reg);
-}
-
-static void sdma_abort_task(unsigned long opaque)
-{
- struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
- u64 status;
- unsigned long flags;
-
- if (test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
- return;
-
- spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-
- status = dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK;
-
- /* nothing to do */
- if (status == IPATH_SDMA_ABORT_NONE)
- goto unlock;
-
- /* ipath_sdma_abort() is done, waiting for interrupt */
- if (status == IPATH_SDMA_ABORT_DISARMED) {
- if (time_before(jiffies, dd->ipath_sdma_abort_intr_timeout))
- goto resched_noprint;
- /* give up, intr got lost somewhere */
- ipath_dbg("give up waiting for SDMADISABLED intr\n");
- __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
- status = IPATH_SDMA_ABORT_ABORTED;
- }
-
- /* everything is stopped, time to clean up and restart */
- if (status == IPATH_SDMA_ABORT_ABORTED) {
- struct ipath_sdma_txreq *txp, *txpnext;
- u64 hwstatus;
- int notify = 0;
-
- hwstatus = ipath_read_kreg64(dd,
- dd->ipath_kregs->kr_senddmastatus);
-
- if ((hwstatus & (IPATH_SDMA_STATUS_SCORE_BOARD_DRAIN_IN_PROG |
- IPATH_SDMA_STATUS_ABORT_IN_PROG |
- IPATH_SDMA_STATUS_INTERNAL_SDMA_ENABLE)) ||
- !(hwstatus & IPATH_SDMA_STATUS_SCB_EMPTY)) {
- if (dd->ipath_sdma_reset_wait > 0) {
- /* not done shutting down sdma */
- --dd->ipath_sdma_reset_wait;
- goto resched;
- }
- ipath_cdbg(VERBOSE, "gave up waiting for quiescent "
- "status after SDMA reset, continuing\n");
- dump_sdma_state(dd);
- }
-
- /* dequeue all "sent" requests */
- list_for_each_entry_safe(txp, txpnext,
- &dd->ipath_sdma_activelist, list) {
- txp->callback_status = IPATH_SDMA_TXREQ_S_ABORTED;
- if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
- vl15_watchdog_deq(dd);
- list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
- notify = 1;
- }
- if (notify)
- tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
-
- /* reset our notion of head and tail */
- dd->ipath_sdma_descq_tail = 0;
- dd->ipath_sdma_descq_head = 0;
- dd->ipath_sdma_head_dma[0] = 0;
- dd->ipath_sdma_generation = 0;
- dd->ipath_sdma_descq_removed = dd->ipath_sdma_descq_added;
-
- /* Reset SendDmaLenGen */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen,
- (u64) dd->ipath_sdma_descq_cnt | (1ULL << 18));
-
- /* done with sdma state for a bit */
- spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
- /*
- * Don't restart sdma here (with the exception
- * below). Wait until link is up to ACTIVE. VL15 MADs
- * used to bring the link up use PIO, and multiple link
- * transitions otherwise cause the sdma engine to be
- * stopped and started multiple times.
- * The disable is done here, including the shadow,
- * so the state is kept consistent.
- * See ipath_restart_sdma() for the actual starting
- * of sdma.
- */
- spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
- dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- dd->ipath_sendctrl);
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
- /* make sure I see next message */
- dd->ipath_sdma_abort_jiffies = 0;
-
- /*
- * Not everything that takes SDMA offline is a link
- * status change. If the link was up, restart SDMA.
- */
- if (dd->ipath_flags & IPATH_LINKACTIVE)
- ipath_restart_sdma(dd);
-
- goto done;
- }
-
-resched:
- /*
- * for now, keep spinning
- * JAG - this is bad to just have default be a loop without
- * state change
- */
- if (time_after(jiffies, dd->ipath_sdma_abort_jiffies)) {
- ipath_dbg("looping with status 0x%08lx\n",
- dd->ipath_sdma_status);
- dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ;
- }
-resched_noprint:
- spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
- if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
- tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
- return;
-
-unlock:
- spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-done:
- return;
-}
-
-/*
- * This is called from interrupt context.
- */
-void ipath_sdma_intr(struct ipath_devdata *dd)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-
- (void) ipath_sdma_make_progress(dd);
-
- spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-}
-
-static int alloc_sdma(struct ipath_devdata *dd)
-{
- int ret = 0;
-
- /* Allocate memory for SendDMA descriptor FIFO */
- dd->ipath_sdma_descq = dma_alloc_coherent(&dd->pcidev->dev,
- SDMA_DESCQ_SZ, &dd->ipath_sdma_descq_phys, GFP_KERNEL);
-
- if (!dd->ipath_sdma_descq) {
- ipath_dev_err(dd, "failed to allocate SendDMA descriptor "
- "FIFO memory\n");
- ret = -ENOMEM;
- goto done;
- }
-
- dd->ipath_sdma_descq_cnt =
- SDMA_DESCQ_SZ / sizeof(struct ipath_sdma_desc);
-
- /* Allocate memory for DMA of head register to memory */
- dd->ipath_sdma_head_dma = dma_alloc_coherent(&dd->pcidev->dev,
- PAGE_SIZE, &dd->ipath_sdma_head_phys, GFP_KERNEL);
- if (!dd->ipath_sdma_head_dma) {
- ipath_dev_err(dd, "failed to allocate SendDMA head memory\n");
- ret = -ENOMEM;
- goto cleanup_descq;
- }
- dd->ipath_sdma_head_dma[0] = 0;
-
- setup_timer(&dd->ipath_sdma_vl15_timer, vl15_watchdog_timeout,
- (unsigned long)dd);
-
- atomic_set(&dd->ipath_sdma_vl15_count, 0);
-
- goto done;
-
-cleanup_descq:
- dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
- (void *)dd->ipath_sdma_descq, dd->ipath_sdma_descq_phys);
- dd->ipath_sdma_descq = NULL;
- dd->ipath_sdma_descq_phys = 0;
-done:
- return ret;
-}
-
-int setup_sdma(struct ipath_devdata *dd)
-{
- int ret = 0;
- unsigned i, n;
- u64 tmp64;
- u64 senddmabufmask[3] = { 0 };
- unsigned long flags;
-
- ret = alloc_sdma(dd);
- if (ret)
- goto done;
-
- if (!dd->ipath_sdma_descq) {
- ipath_dev_err(dd, "SendDMA memory not allocated\n");
- goto done;
- }
-
- /*
- * Set initial status as if we had been up, then gone down.
- * This lets initial start on transition to ACTIVE be the
- * same as restart after link flap.
- */
- dd->ipath_sdma_status = IPATH_SDMA_ABORT_ABORTED;
- dd->ipath_sdma_abort_jiffies = 0;
- dd->ipath_sdma_generation = 0;
- dd->ipath_sdma_descq_tail = 0;
- dd->ipath_sdma_descq_head = 0;
- dd->ipath_sdma_descq_removed = 0;
- dd->ipath_sdma_descq_added = 0;
-
- /* Set SendDmaBase */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase,
- dd->ipath_sdma_descq_phys);
- /* Set SendDmaLenGen */
- tmp64 = dd->ipath_sdma_descq_cnt;
- tmp64 |= 1<<18; /* enable generation checking */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, tmp64);
- /* Set SendDmaTail */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail,
- dd->ipath_sdma_descq_tail);
- /* Set SendDmaHeadAddr */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr,
- dd->ipath_sdma_head_phys);
-
- /*
- * Reserve all the former "kernel" piobufs, using high number range
- * so we get as many 4K buffers as possible
- */
- n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
- i = dd->ipath_lastport_piobuf + dd->ipath_pioreserved;
- ipath_chg_pioavailkernel(dd, i, n - i , 0);
- for (; i < n; ++i) {
- unsigned word = i / 64;
- unsigned bit = i & 63;
- BUG_ON(word >= 3);
- senddmabufmask[word] |= 1ULL << bit;
- }
- ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0,
- senddmabufmask[0]);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1,
- senddmabufmask[1]);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2,
- senddmabufmask[2]);
-
- INIT_LIST_HEAD(&dd->ipath_sdma_activelist);
- INIT_LIST_HEAD(&dd->ipath_sdma_notifylist);
-
- tasklet_init(&dd->ipath_sdma_notify_task, sdma_notify_task,
- (unsigned long) dd);
- tasklet_init(&dd->ipath_sdma_abort_task, sdma_abort_task,
- (unsigned long) dd);
-
- /*
- * No use to turn on SDMA here, as link is probably not ACTIVE
- * Just mark it RUNNING and enable the interrupt, and let the
- * ipath_restart_sdma() on link transition to ACTIVE actually
- * enable it.
- */
- spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
- dd->ipath_sendctrl |= INFINIPATH_S_SDMAINTENABLE;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- __set_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
- spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-done:
- return ret;
-}
-
-void teardown_sdma(struct ipath_devdata *dd)
-{
- struct ipath_sdma_txreq *txp, *txpnext;
- unsigned long flags;
- dma_addr_t sdma_head_phys = 0;
- dma_addr_t sdma_descq_phys = 0;
- void *sdma_descq = NULL;
- void *sdma_head_dma = NULL;
-
- spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
- __clear_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
- __set_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
- __set_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status);
- spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
- tasklet_kill(&dd->ipath_sdma_abort_task);
- tasklet_kill(&dd->ipath_sdma_notify_task);
-
- /* turn off sdma */
- spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
- dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- dd->ipath_sendctrl);
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
- spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
- /* dequeue all "sent" requests */
- list_for_each_entry_safe(txp, txpnext, &dd->ipath_sdma_activelist,
- list) {
- txp->callback_status = IPATH_SDMA_TXREQ_S_SHUTDOWN;
- if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
- vl15_watchdog_deq(dd);
- list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
- }
- spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
- sdma_notify_taskbody(dd);
-
- del_timer_sync(&dd->ipath_sdma_vl15_timer);
-
- spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-
- dd->ipath_sdma_abort_jiffies = 0;
-
- ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase, 0);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, 0);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, 0);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, 0);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, 0);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, 0);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2, 0);
-
- if (dd->ipath_sdma_head_dma) {
- sdma_head_dma = (void *) dd->ipath_sdma_head_dma;
- sdma_head_phys = dd->ipath_sdma_head_phys;
- dd->ipath_sdma_head_dma = NULL;
- dd->ipath_sdma_head_phys = 0;
- }
-
- if (dd->ipath_sdma_descq) {
- sdma_descq = dd->ipath_sdma_descq;
- sdma_descq_phys = dd->ipath_sdma_descq_phys;
- dd->ipath_sdma_descq = NULL;
- dd->ipath_sdma_descq_phys = 0;
- }
-
- spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
- if (sdma_head_dma)
- dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
- sdma_head_dma, sdma_head_phys);
-
- if (sdma_descq)
- dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
- sdma_descq, sdma_descq_phys);
-}
-
-/*
- * [Re]start SDMA, if we use it, and it's not already OK.
- * This is called on transition to link ACTIVE, either the first or
- * subsequent times.
- */
-void ipath_restart_sdma(struct ipath_devdata *dd)
-{
- unsigned long flags;
- int needed = 1;
-
- if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA))
- goto bail;
-
- /*
- * First, make sure we should, which is to say,
- * check that we are "RUNNING" (not in teardown)
- * and not "SHUTDOWN"
- */
- spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
- if (!test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)
- || test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
- needed = 0;
- else {
- __clear_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
- __clear_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
- __clear_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
- }
- spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
- if (!needed) {
- ipath_dbg("invalid attempt to restart SDMA, status 0x%08lx\n",
- dd->ipath_sdma_status);
- goto bail;
- }
- spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
- /*
- * First clear, just to be safe. Enable is only done
- * in chip on 0->1 transition
- */
- dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- dd->ipath_sendctrl |= INFINIPATH_S_SDMAENABLE;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
- /* notify upper layers */
- ipath_ib_piobufavail(dd->verbs_dev);
-
-bail:
- return;
-}
-
-static inline void make_sdma_desc(struct ipath_devdata *dd,
- u64 *sdmadesc, u64 addr, u64 dwlen, u64 dwoffset)
-{
- WARN_ON(addr & 3);
- /* SDmaPhyAddr[47:32] */
- sdmadesc[1] = addr >> 32;
- /* SDmaPhyAddr[31:0] */
- sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
- /* SDmaGeneration[1:0] */
- sdmadesc[0] |= (dd->ipath_sdma_generation & 3ULL) << 30;
- /* SDmaDwordCount[10:0] */
- sdmadesc[0] |= (dwlen & 0x7ffULL) << 16;
- /* SDmaBufOffset[12:2] */
- sdmadesc[0] |= dwoffset & 0x7ffULL;
-}
-
-/*
- * This function queues one IB packet onto the send DMA queue per call.
- * The caller is responsible for checking:
- * 1) The number of send DMA descriptor entries is less than the size of
- * the descriptor queue.
- * 2) The IB SGE addresses and lengths are 32-bit aligned
- * (except possibly the last SGE's length)
- * 3) The SGE addresses are suitable for passing to dma_map_single().
- */
-int ipath_sdma_verbs_send(struct ipath_devdata *dd,
- struct ipath_sge_state *ss, u32 dwords,
- struct ipath_verbs_txreq *tx)
-{
-
- unsigned long flags;
- struct ipath_sge *sge;
- int ret = 0;
- u16 tail;
- __le64 *descqp;
- u64 sdmadesc[2];
- u32 dwoffset;
- dma_addr_t addr;
-
- if ((tx->map_len + (dwords<<2)) > dd->ipath_ibmaxlen) {
- ipath_dbg("packet size %X > ibmax %X, fail\n",
- tx->map_len + (dwords<<2), dd->ipath_ibmaxlen);
- ret = -EMSGSIZE;
- goto fail;
- }
-
- spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-
-retry:
- if (unlikely(test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status))) {
- ret = -EBUSY;
- goto unlock;
- }
-
- if (tx->txreq.sg_count > ipath_sdma_descq_freecnt(dd)) {
- if (ipath_sdma_make_progress(dd))
- goto retry;
- ret = -ENOBUFS;
- goto unlock;
- }
-
- addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
- tx->map_len, DMA_TO_DEVICE);
- if (dma_mapping_error(&dd->pcidev->dev, addr))
- goto ioerr;
-
- dwoffset = tx->map_len >> 2;
- make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0);
-
- /* SDmaFirstDesc */
- sdmadesc[0] |= 1ULL << 12;
- if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
- sdmadesc[0] |= 1ULL << 14; /* SDmaUseLargeBuf */
-
- /* write to the descq */
- tail = dd->ipath_sdma_descq_tail;
- descqp = &dd->ipath_sdma_descq[tail].qw[0];
- *descqp++ = cpu_to_le64(sdmadesc[0]);
- *descqp++ = cpu_to_le64(sdmadesc[1]);
-
- if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEDESC)
- tx->txreq.start_idx = tail;
-
- /* increment the tail */
- if (++tail == dd->ipath_sdma_descq_cnt) {
- tail = 0;
- descqp = &dd->ipath_sdma_descq[0].qw[0];
- ++dd->ipath_sdma_generation;
- }
-
- sge = &ss->sge;
- while (dwords) {
- u32 dw;
- u32 len;
-
- len = dwords << 2;
- if (len > sge->length)
- len = sge->length;
- if (len > sge->sge_length)
- len = sge->sge_length;
- BUG_ON(len == 0);
- dw = (len + 3) >> 2;
- addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&dd->pcidev->dev, addr))
- goto unmap;
- make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
- /* SDmaUseLargeBuf has to be set in every descriptor */
- if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
- sdmadesc[0] |= 1ULL << 14;
- /* write to the descq */
- *descqp++ = cpu_to_le64(sdmadesc[0]);
- *descqp++ = cpu_to_le64(sdmadesc[1]);
-
- /* increment the tail */
- if (++tail == dd->ipath_sdma_descq_cnt) {
- tail = 0;
- descqp = &dd->ipath_sdma_descq[0].qw[0];
- ++dd->ipath_sdma_generation;
- }
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (--ss->num_sge)
- *sge = *ss->sg_list++;
- } else if (sge->length == 0 && sge->mr != NULL) {
- if (++sge->n >= IPATH_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
-
- dwoffset += dw;
- dwords -= dw;
- }
-
- if (!tail)
- descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0];
- descqp -= 2;
- /* SDmaLastDesc */
- descqp[0] |= cpu_to_le64(1ULL << 11);
- if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) {
- /* SDmaIntReq */
- descqp[0] |= cpu_to_le64(1ULL << 15);
- }
-
- /* Commit writes to memory and advance the tail on the chip */
- wmb();
- ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
-
- tx->txreq.next_descq_idx = tail;
- tx->txreq.callback_status = IPATH_SDMA_TXREQ_S_OK;
- dd->ipath_sdma_descq_tail = tail;
- dd->ipath_sdma_descq_added += tx->txreq.sg_count;
- list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist);
- if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15)
- vl15_watchdog_enq(dd);
- goto unlock;
-
-unmap:
- while (tail != dd->ipath_sdma_descq_tail) {
- if (!tail)
- tail = dd->ipath_sdma_descq_cnt - 1;
- else
- tail--;
- unmap_desc(dd, tail);
- }
-ioerr:
- ret = -EIO;
-unlock:
- spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-fail:
- return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_srq.c b/drivers/staging/rdma/ipath/ipath_srq.c
deleted file mode 100644
index 26271984b717..000000000000
--- a/drivers/staging/rdma/ipath/ipath_srq.c
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include "ipath_verbs.h"
-
-/**
- * ipath_post_srq_receive - post a receive on a shared receive queue
- * @ibsrq: the SRQ to post the receive on
- * @wr: the list of work requests to post
- * @bad_wr: the first WR to cause a problem is put here
- *
- * This may be called from interrupt context.
- */
-int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
-{
- struct ipath_srq *srq = to_isrq(ibsrq);
- struct ipath_rwq *wq;
- unsigned long flags;
- int ret;
-
- for (; wr; wr = wr->next) {
- struct ipath_rwqe *wqe;
- u32 next;
- int i;
-
- if ((unsigned) wr->num_sge > srq->rq.max_sge) {
- *bad_wr = wr;
- ret = -EINVAL;
- goto bail;
- }
-
- spin_lock_irqsave(&srq->rq.lock, flags);
- wq = srq->rq.wq;
- next = wq->head + 1;
- if (next >= srq->rq.size)
- next = 0;
- if (next == wq->tail) {
- spin_unlock_irqrestore(&srq->rq.lock, flags);
- *bad_wr = wr;
- ret = -ENOMEM;
- goto bail;
- }
-
- wqe = get_rwqe_ptr(&srq->rq, wq->head);
- wqe->wr_id = wr->wr_id;
- wqe->num_sge = wr->num_sge;
- for (i = 0; i < wr->num_sge; i++)
- wqe->sg_list[i] = wr->sg_list[i];
- /* Make sure queue entry is written before the head index. */
- smp_wmb();
- wq->head = next;
- spin_unlock_irqrestore(&srq->rq.lock, flags);
- }
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * ipath_create_srq - create a shared receive queue
- * @ibpd: the protection domain of the SRQ to create
- * @srq_init_attr: the attributes of the SRQ
- * @udata: data from libipathverbs when creating a user SRQ
- */
-struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
- struct ib_srq_init_attr *srq_init_attr,
- struct ib_udata *udata)
-{
- struct ipath_ibdev *dev = to_idev(ibpd->device);
- struct ipath_srq *srq;
- u32 sz;
- struct ib_srq *ret;
-
- if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
- ret = ERR_PTR(-ENOSYS);
- goto done;
- }
-
- if (srq_init_attr->attr.max_wr == 0) {
- ret = ERR_PTR(-EINVAL);
- goto done;
- }
-
- if ((srq_init_attr->attr.max_sge > ib_ipath_max_srq_sges) ||
- (srq_init_attr->attr.max_wr > ib_ipath_max_srq_wrs)) {
- ret = ERR_PTR(-EINVAL);
- goto done;
- }
-
- srq = kmalloc(sizeof(*srq), GFP_KERNEL);
- if (!srq) {
- ret = ERR_PTR(-ENOMEM);
- goto done;
- }
-
- /*
- * Need to use vmalloc() if we want to support large #s of entries.
- */
- srq->rq.size = srq_init_attr->attr.max_wr + 1;
- srq->rq.max_sge = srq_init_attr->attr.max_sge;
- sz = sizeof(struct ib_sge) * srq->rq.max_sge +
- sizeof(struct ipath_rwqe);
- srq->rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + srq->rq.size * sz);
- if (!srq->rq.wq) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_srq;
- }
-
- /*
- * Return the address of the RWQ as the offset to mmap.
- * See ipath_mmap() for details.
- */
- if (udata && udata->outlen >= sizeof(__u64)) {
- int err;
- u32 s = sizeof(struct ipath_rwq) + srq->rq.size * sz;
-
- srq->ip =
- ipath_create_mmap_info(dev, s,
- ibpd->uobject->context,
- srq->rq.wq);
- if (!srq->ip) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_wq;
- }
-
- err = ib_copy_to_udata(udata, &srq->ip->offset,
- sizeof(srq->ip->offset));
- if (err) {
- ret = ERR_PTR(err);
- goto bail_ip;
- }
- } else
- srq->ip = NULL;
-
- /*
- * ib_create_srq() will initialize srq->ibsrq.
- */
- spin_lock_init(&srq->rq.lock);
- srq->rq.wq->head = 0;
- srq->rq.wq->tail = 0;
- srq->limit = srq_init_attr->attr.srq_limit;
-
- spin_lock(&dev->n_srqs_lock);
- if (dev->n_srqs_allocated == ib_ipath_max_srqs) {
- spin_unlock(&dev->n_srqs_lock);
- ret = ERR_PTR(-ENOMEM);
- goto bail_ip;
- }
-
- dev->n_srqs_allocated++;
- spin_unlock(&dev->n_srqs_lock);
-
- if (srq->ip) {
- spin_lock_irq(&dev->pending_lock);
- list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
- }
-
- ret = &srq->ibsrq;
- goto done;
-
-bail_ip:
- kfree(srq->ip);
-bail_wq:
- vfree(srq->rq.wq);
-bail_srq:
- kfree(srq);
-done:
- return ret;
-}
-
-/**
- * ipath_modify_srq - modify a shared receive queue
- * @ibsrq: the SRQ to modify
- * @attr: the new attributes of the SRQ
- * @attr_mask: indicates which attributes to modify
- * @udata: user data for ipathverbs.so
- */
-int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
- enum ib_srq_attr_mask attr_mask,
- struct ib_udata *udata)
-{
- struct ipath_srq *srq = to_isrq(ibsrq);
- struct ipath_rwq *wq;
- int ret = 0;
-
- if (attr_mask & IB_SRQ_MAX_WR) {
- struct ipath_rwq *owq;
- struct ipath_rwqe *p;
- u32 sz, size, n, head, tail;
-
- /* Check that the requested sizes are below the limits. */
- if ((attr->max_wr > ib_ipath_max_srq_wrs) ||
- ((attr_mask & IB_SRQ_LIMIT) ?
- attr->srq_limit : srq->limit) > attr->max_wr) {
- ret = -EINVAL;
- goto bail;
- }
-
- sz = sizeof(struct ipath_rwqe) +
- srq->rq.max_sge * sizeof(struct ib_sge);
- size = attr->max_wr + 1;
- wq = vmalloc_user(sizeof(struct ipath_rwq) + size * sz);
- if (!wq) {
- ret = -ENOMEM;
- goto bail;
- }
-
- /* Check that we can write the offset to mmap. */
- if (udata && udata->inlen >= sizeof(__u64)) {
- __u64 offset_addr;
- __u64 offset = 0;
-
- ret = ib_copy_from_udata(&offset_addr, udata,
- sizeof(offset_addr));
- if (ret)
- goto bail_free;
- udata->outbuf =
- (void __user *) (unsigned long) offset_addr;
- ret = ib_copy_to_udata(udata, &offset,
- sizeof(offset));
- if (ret)
- goto bail_free;
- }
-
- spin_lock_irq(&srq->rq.lock);
- /*
- * validate head pointer value and compute
- * the number of remaining WQEs.
- */
- owq = srq->rq.wq;
- head = owq->head;
- if (head >= srq->rq.size)
- head = 0;
- tail = owq->tail;
- if (tail >= srq->rq.size)
- tail = 0;
- n = head;
- if (n < tail)
- n += srq->rq.size - tail;
- else
- n -= tail;
- if (size <= n) {
- ret = -EINVAL;
- goto bail_unlock;
- }
- n = 0;
- p = wq->wq;
- while (tail != head) {
- struct ipath_rwqe *wqe;
- int i;
-
- wqe = get_rwqe_ptr(&srq->rq, tail);
- p->wr_id = wqe->wr_id;
- p->num_sge = wqe->num_sge;
- for (i = 0; i < wqe->num_sge; i++)
- p->sg_list[i] = wqe->sg_list[i];
- n++;
- p = (struct ipath_rwqe *)((char *) p + sz);
- if (++tail >= srq->rq.size)
- tail = 0;
- }
- srq->rq.wq = wq;
- srq->rq.size = size;
- wq->head = n;
- wq->tail = 0;
- if (attr_mask & IB_SRQ_LIMIT)
- srq->limit = attr->srq_limit;
- spin_unlock_irq(&srq->rq.lock);
-
- vfree(owq);
-
- if (srq->ip) {
- struct ipath_mmap_info *ip = srq->ip;
- struct ipath_ibdev *dev = to_idev(srq->ibsrq.device);
- u32 s = sizeof(struct ipath_rwq) + size * sz;
-
- ipath_update_mmap_info(dev, ip, s, wq);
-
- /*
- * Return the offset to mmap.
- * See ipath_mmap() for details.
- */
- if (udata && udata->inlen >= sizeof(__u64)) {
- ret = ib_copy_to_udata(udata, &ip->offset,
- sizeof(ip->offset));
- if (ret)
- goto bail;
- }
-
- spin_lock_irq(&dev->pending_lock);
- if (list_empty(&ip->pending_mmaps))
- list_add(&ip->pending_mmaps,
- &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
- }
- } else if (attr_mask & IB_SRQ_LIMIT) {
- spin_lock_irq(&srq->rq.lock);
- if (attr->srq_limit >= srq->rq.size)
- ret = -EINVAL;
- else
- srq->limit = attr->srq_limit;
- spin_unlock_irq(&srq->rq.lock);
- }
- goto bail;
-
-bail_unlock:
- spin_unlock_irq(&srq->rq.lock);
-bail_free:
- vfree(wq);
-bail:
- return ret;
-}
-
-int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
-{
- struct ipath_srq *srq = to_isrq(ibsrq);
-
- attr->max_wr = srq->rq.size - 1;
- attr->max_sge = srq->rq.max_sge;
- attr->srq_limit = srq->limit;
- return 0;
-}
-
-/**
- * ipath_destroy_srq - destroy a shared receive queue
- * @ibsrq: the SRQ to destroy
- */
-int ipath_destroy_srq(struct ib_srq *ibsrq)
-{
- struct ipath_srq *srq = to_isrq(ibsrq);
- struct ipath_ibdev *dev = to_idev(ibsrq->device);
-
- spin_lock(&dev->n_srqs_lock);
- dev->n_srqs_allocated--;
- spin_unlock(&dev->n_srqs_lock);
- if (srq->ip)
- kref_put(&srq->ip->ref, ipath_release_mmap_info);
- else
- vfree(srq->rq.wq);
- kfree(srq);
-
- return 0;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_stats.c b/drivers/staging/rdma/ipath/ipath_stats.c
deleted file mode 100644
index f63e143e3292..000000000000
--- a/drivers/staging/rdma/ipath/ipath_stats.c
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "ipath_kernel.h"
-
-struct infinipath_stats ipath_stats;
-
-/**
- * ipath_snap_cntr - snapshot a chip counter
- * @dd: the infinipath device
- * @creg: the counter to snapshot
- *
- * called from add_timer and user counter read calls, to deal with
- * counters that wrap in "human time". The words sent and received, and
- * the packets sent and received are all that we worry about. For now,
- * at least, we don't worry about error counters, because if they wrap
- * that quickly, we probably don't care. We may eventually just make this
- * handle all the counters. word counters can wrap in about 20 seconds
- * of full bandwidth traffic, packet counters in a few hours.
- */
-
-u64 ipath_snap_cntr(struct ipath_devdata *dd, ipath_creg creg)
-{
- u32 val, reg64 = 0;
- u64 val64;
- unsigned long t0, t1;
- u64 ret;
-
- t0 = jiffies;
- /* If fast increment counters are only 32 bits, snapshot them,
- * and maintain them as 64bit values in the driver */
- if (!(dd->ipath_flags & IPATH_32BITCOUNTERS) &&
- (creg == dd->ipath_cregs->cr_wordsendcnt ||
- creg == dd->ipath_cregs->cr_wordrcvcnt ||
- creg == dd->ipath_cregs->cr_pktsendcnt ||
- creg == dd->ipath_cregs->cr_pktrcvcnt)) {
- val64 = ipath_read_creg(dd, creg);
- val = val64 == ~0ULL ? ~0U : 0;
- reg64 = 1;
- } else /* val64 just to keep gcc quiet... */
- val64 = val = ipath_read_creg32(dd, creg);
- /*
- * See if a second has passed. This is just a way to detect things
- * that are quite broken. Normally this should take just a few
- * cycles (the check is for long enough that we don't care if we get
- * pre-empted.) An Opteron HT O read timeout is 4 seconds with
- * normal NB values
- */
- t1 = jiffies;
- if (time_before(t0 + HZ, t1) && val == -1) {
- ipath_dev_err(dd, "Error! Read counter 0x%x timed out\n",
- creg);
- ret = 0ULL;
- goto bail;
- }
- if (reg64) {
- ret = val64;
- goto bail;
- }
-
- if (creg == dd->ipath_cregs->cr_wordsendcnt) {
- if (val != dd->ipath_lastsword) {
- dd->ipath_sword += val - dd->ipath_lastsword;
- dd->ipath_lastsword = val;
- }
- val64 = dd->ipath_sword;
- } else if (creg == dd->ipath_cregs->cr_wordrcvcnt) {
- if (val != dd->ipath_lastrword) {
- dd->ipath_rword += val - dd->ipath_lastrword;
- dd->ipath_lastrword = val;
- }
- val64 = dd->ipath_rword;
- } else if (creg == dd->ipath_cregs->cr_pktsendcnt) {
- if (val != dd->ipath_lastspkts) {
- dd->ipath_spkts += val - dd->ipath_lastspkts;
- dd->ipath_lastspkts = val;
- }
- val64 = dd->ipath_spkts;
- } else if (creg == dd->ipath_cregs->cr_pktrcvcnt) {
- if (val != dd->ipath_lastrpkts) {
- dd->ipath_rpkts += val - dd->ipath_lastrpkts;
- dd->ipath_lastrpkts = val;
- }
- val64 = dd->ipath_rpkts;
- } else if (creg == dd->ipath_cregs->cr_ibsymbolerrcnt) {
- if (dd->ibdeltainprog)
- val64 -= val64 - dd->ibsymsnap;
- val64 -= dd->ibsymdelta;
- } else if (creg == dd->ipath_cregs->cr_iblinkerrrecovcnt) {
- if (dd->ibdeltainprog)
- val64 -= val64 - dd->iblnkerrsnap;
- val64 -= dd->iblnkerrdelta;
- } else
- val64 = (u64) val;
-
- ret = val64;
-
-bail:
- return ret;
-}
-
-/**
- * ipath_qcheck - print delta of egrfull/hdrqfull errors for kernel ports
- * @dd: the infinipath device
- *
- * print the delta of egrfull/hdrqfull errors for kernel ports no more than
- * every 5 seconds. User processes are printed at close, but kernel doesn't
- * close, so... Separate routine so may call from other places someday, and
- * so function name when printed by _IPATH_INFO is meaningfull
- */
-static void ipath_qcheck(struct ipath_devdata *dd)
-{
- static u64 last_tot_hdrqfull;
- struct ipath_portdata *pd = dd->ipath_pd[0];
- size_t blen = 0;
- char buf[128];
- u32 hdrqtail;
-
- *buf = 0;
- if (pd->port_hdrqfull != dd->ipath_p0_hdrqfull) {
- blen = snprintf(buf, sizeof buf, "port 0 hdrqfull %u",
- pd->port_hdrqfull -
- dd->ipath_p0_hdrqfull);
- dd->ipath_p0_hdrqfull = pd->port_hdrqfull;
- }
- if (ipath_stats.sps_etidfull != dd->ipath_last_tidfull) {
- blen += snprintf(buf + blen, sizeof buf - blen,
- "%srcvegrfull %llu",
- blen ? ", " : "",
- (unsigned long long)
- (ipath_stats.sps_etidfull -
- dd->ipath_last_tidfull));
- dd->ipath_last_tidfull = ipath_stats.sps_etidfull;
- }
-
- /*
- * this is actually the number of hdrq full interrupts, not actual
- * events, but at the moment that's mostly what I'm interested in.
- * Actual count, etc. is in the counters, if needed. For production
- * users this won't ordinarily be printed.
- */
-
- if ((ipath_debug & (__IPATH_PKTDBG | __IPATH_DBG)) &&
- ipath_stats.sps_hdrqfull != last_tot_hdrqfull) {
- blen += snprintf(buf + blen, sizeof buf - blen,
- "%shdrqfull %llu (all ports)",
- blen ? ", " : "",
- (unsigned long long)
- (ipath_stats.sps_hdrqfull -
- last_tot_hdrqfull));
- last_tot_hdrqfull = ipath_stats.sps_hdrqfull;
- }
- if (blen)
- ipath_dbg("%s\n", buf);
-
- hdrqtail = ipath_get_hdrqtail(pd);
- if (pd->port_head != hdrqtail) {
- if (dd->ipath_lastport0rcv_cnt ==
- ipath_stats.sps_port0pkts) {
- ipath_cdbg(PKT, "missing rcv interrupts? "
- "port0 hd=%x tl=%x; port0pkts %llx; write"
- " hd (w/intr)\n",
- pd->port_head, hdrqtail,
- (unsigned long long)
- ipath_stats.sps_port0pkts);
- ipath_write_ureg(dd, ur_rcvhdrhead, hdrqtail |
- dd->ipath_rhdrhead_intr_off, pd->port_port);
- }
- dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts;
- }
-}
-
-static void ipath_chk_errormask(struct ipath_devdata *dd)
-{
- static u32 fixed;
- u32 ctrl;
- unsigned long errormask;
- unsigned long hwerrs;
-
- if (!dd->ipath_errormask || !(dd->ipath_flags & IPATH_INITTED))
- return;
-
- errormask = ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask);
-
- if (errormask == dd->ipath_errormask)
- return;
- fixed++;
-
- hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
- ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
-
- ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
- dd->ipath_errormask);
-
- if ((hwerrs & dd->ipath_hwerrmask) ||
- (ctrl & INFINIPATH_C_FREEZEMODE)) {
- /* force re-interrupt of pending events, just in case */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, 0ULL);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, 0ULL);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
- dev_info(&dd->pcidev->dev,
- "errormask fixed(%u) %lx -> %lx, ctrl %x hwerr %lx\n",
- fixed, errormask, (unsigned long)dd->ipath_errormask,
- ctrl, hwerrs);
- } else
- ipath_dbg("errormask fixed(%u) %lx -> %lx, no freeze\n",
- fixed, errormask,
- (unsigned long)dd->ipath_errormask);
-}
-
-
-/**
- * ipath_get_faststats - get word counters from chip before they overflow
- * @opaque - contains a pointer to the infinipath device ipath_devdata
- *
- * called from add_timer
- */
-void ipath_get_faststats(unsigned long opaque)
-{
- struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
- int i;
- static unsigned cnt;
- unsigned long flags;
- u64 traffic_wds;
-
- /*
- * don't access the chip while running diags, or memory diags can
- * fail
- */
- if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_INITTED) ||
- ipath_diag_inuse)
- /* but re-arm the timer, for diags case; won't hurt other */
- goto done;
-
- /*
- * We now try to maintain a "active timer", based on traffic
- * exceeding a threshold, so we need to check the word-counts
- * even if they are 64-bit.
- */
- traffic_wds = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt) +
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
- spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
- traffic_wds -= dd->ipath_traffic_wds;
- dd->ipath_traffic_wds += traffic_wds;
- if (traffic_wds >= IPATH_TRAFFIC_ACTIVE_THRESHOLD)
- atomic_add(5, &dd->ipath_active_time); /* S/B #define */
- spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
-
- if (dd->ipath_flags & IPATH_32BITCOUNTERS) {
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
- }
-
- ipath_qcheck(dd);
-
- /*
- * deal with repeat error suppression. Doesn't really matter if
- * last error was almost a full interval ago, or just a few usecs
- * ago; still won't get more than 2 per interval. We may want
- * longer intervals for this eventually, could do with mod, counter
- * or separate timer. Also see code in ipath_handle_errors() and
- * ipath_handle_hwerrors().
- */
-
- if (dd->ipath_lasterror)
- dd->ipath_lasterror = 0;
- if (dd->ipath_lasthwerror)
- dd->ipath_lasthwerror = 0;
- if (dd->ipath_maskederrs
- && time_after(jiffies, dd->ipath_unmasktime)) {
- char ebuf[256];
- int iserr;
- iserr = ipath_decode_err(dd, ebuf, sizeof ebuf,
- dd->ipath_maskederrs);
- if (dd->ipath_maskederrs &
- ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
- INFINIPATH_E_PKTERRS))
- ipath_dev_err(dd, "Re-enabling masked errors "
- "(%s)\n", ebuf);
- else {
- /*
- * rcvegrfull and rcvhdrqfull are "normal", for some
- * types of processes (mostly benchmarks) that send
- * huge numbers of messages, while not processing
- * them. So only complain about these at debug
- * level.
- */
- if (iserr)
- ipath_dbg(
- "Re-enabling queue full errors (%s)\n",
- ebuf);
- else
- ipath_cdbg(ERRPKT, "Re-enabling packet"
- " problem interrupt (%s)\n", ebuf);
- }
-
- /* re-enable masked errors */
- dd->ipath_errormask |= dd->ipath_maskederrs;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
- dd->ipath_errormask);
- dd->ipath_maskederrs = 0;
- }
-
- /* limit qfull messages to ~one per minute per port */
- if ((++cnt & 0x10)) {
- for (i = (int) dd->ipath_cfgports; --i >= 0; ) {
- struct ipath_portdata *pd = dd->ipath_pd[i];
-
- if (pd && pd->port_lastrcvhdrqtail != -1)
- pd->port_lastrcvhdrqtail = -1;
- }
- }
-
- ipath_chk_errormask(dd);
-done:
- mod_timer(&dd->ipath_stats_timer, jiffies + HZ * 5);
-}
diff --git a/drivers/staging/rdma/ipath/ipath_sysfs.c b/drivers/staging/rdma/ipath/ipath_sysfs.c
deleted file mode 100644
index b12b1f6caf59..000000000000
--- a/drivers/staging/rdma/ipath/ipath_sysfs.c
+++ /dev/null
@@ -1,1237 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/ctype.h>
-#include <linux/stat.h>
-
-#include "ipath_kernel.h"
-#include "ipath_verbs.h"
-#include "ipath_common.h"
-
-/**
- * ipath_parse_ushort - parse an unsigned short value in an arbitrary base
- * @str: the string containing the number
- * @valp: where to put the result
- *
- * returns the number of bytes consumed, or negative value on error
- */
-int ipath_parse_ushort(const char *str, unsigned short *valp)
-{
- unsigned long val;
- char *end;
- int ret;
-
- if (!isdigit(str[0])) {
- ret = -EINVAL;
- goto bail;
- }
-
- val = simple_strtoul(str, &end, 0);
-
- if (val > 0xffff) {
- ret = -EINVAL;
- goto bail;
- }
-
- *valp = val;
-
- ret = end + 1 - str;
- if (ret == 0)
- ret = -EINVAL;
-
-bail:
- return ret;
-}
-
-static ssize_t show_version(struct device_driver *dev, char *buf)
-{
- /* The string printed here is already newline-terminated. */
- return scnprintf(buf, PAGE_SIZE, "%s", ib_ipath_version);
-}
-
-static ssize_t show_num_units(struct device_driver *dev, char *buf)
-{
- return scnprintf(buf, PAGE_SIZE, "%d\n",
- ipath_count_units(NULL, NULL, NULL));
-}
-
-static ssize_t show_status(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- ssize_t ret;
-
- if (!dd->ipath_statusp) {
- ret = -EINVAL;
- goto bail;
- }
-
- ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n",
- (unsigned long long) *(dd->ipath_statusp));
-
-bail:
- return ret;
-}
-
-static const char *ipath_status_str[] = {
- "Initted",
- "Disabled",
- "Admin_Disabled",
- "", /* This used to be the old "OIB_SMA" status. */
- "", /* This used to be the old "SMA" status. */
- "Present",
- "IB_link_up",
- "IB_configured",
- "NoIBcable",
- "Fatal_Hardware_Error",
- NULL,
-};
-
-static ssize_t show_status_str(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- int i, any;
- u64 s;
- ssize_t ret;
-
- if (!dd->ipath_statusp) {
- ret = -EINVAL;
- goto bail;
- }
-
- s = *(dd->ipath_statusp);
- *buf = '\0';
- for (any = i = 0; s && ipath_status_str[i]; i++) {
- if (s & 1) {
- if (any && strlcat(buf, " ", PAGE_SIZE) >=
- PAGE_SIZE)
- /* overflow */
- break;
- if (strlcat(buf, ipath_status_str[i],
- PAGE_SIZE) >= PAGE_SIZE)
- break;
- any = 1;
- }
- s >>= 1;
- }
- if (any)
- strlcat(buf, "\n", PAGE_SIZE);
-
- ret = strlen(buf);
-
-bail:
- return ret;
-}
-
-static ssize_t show_boardversion(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- /* The string printed here is already newline-terminated. */
- return scnprintf(buf, PAGE_SIZE, "%s", dd->ipath_boardversion);
-}
-
-static ssize_t show_localbus_info(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- /* The string printed here is already newline-terminated. */
- return scnprintf(buf, PAGE_SIZE, "%s", dd->ipath_lbus_info);
-}
-
-static ssize_t show_lmc(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
-
- return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_lmc);
-}
-
-static ssize_t store_lmc(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- u16 lmc = 0;
- int ret;
-
- ret = ipath_parse_ushort(buf, &lmc);
- if (ret < 0)
- goto invalid;
-
- if (lmc > 7) {
- ret = -EINVAL;
- goto invalid;
- }
-
- ipath_set_lid(dd, dd->ipath_lid, lmc);
-
- goto bail;
-invalid:
- ipath_dev_err(dd, "attempt to set invalid LMC %u\n", lmc);
-bail:
- return ret;
-}
-
-static ssize_t show_lid(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
-
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", dd->ipath_lid);
-}
-
-static ssize_t store_lid(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- u16 lid = 0;
- int ret;
-
- ret = ipath_parse_ushort(buf, &lid);
- if (ret < 0)
- goto invalid;
-
- if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE) {
- ret = -EINVAL;
- goto invalid;
- }
-
- ipath_set_lid(dd, lid, dd->ipath_lmc);
-
- goto bail;
-invalid:
- ipath_dev_err(dd, "attempt to set invalid LID 0x%x\n", lid);
-bail:
- return ret;
-}
-
-static ssize_t show_mlid(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
-
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", dd->ipath_mlid);
-}
-
-static ssize_t store_mlid(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- u16 mlid;
- int ret;
-
- ret = ipath_parse_ushort(buf, &mlid);
- if (ret < 0 || mlid < IPATH_MULTICAST_LID_BASE)
- goto invalid;
-
- dd->ipath_mlid = mlid;
-
- goto bail;
-invalid:
- ipath_dev_err(dd, "attempt to set invalid MLID\n");
-bail:
- return ret;
-}
-
-static ssize_t show_guid(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- u8 *guid;
-
- guid = (u8 *) & (dd->ipath_guid);
-
- return scnprintf(buf, PAGE_SIZE,
- "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
- guid[0], guid[1], guid[2], guid[3],
- guid[4], guid[5], guid[6], guid[7]);
-}
-
-static ssize_t store_guid(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- ssize_t ret;
- unsigned short guid[8];
- __be64 new_guid;
- u8 *ng;
- int i;
-
- if (sscanf(buf, "%hx:%hx:%hx:%hx:%hx:%hx:%hx:%hx",
- &guid[0], &guid[1], &guid[2], &guid[3],
- &guid[4], &guid[5], &guid[6], &guid[7]) != 8)
- goto invalid;
-
- ng = (u8 *) &new_guid;
-
- for (i = 0; i < 8; i++) {
- if (guid[i] > 0xff)
- goto invalid;
- ng[i] = guid[i];
- }
-
- if (new_guid == 0)
- goto invalid;
-
- dd->ipath_guid = new_guid;
- dd->ipath_nguid = 1;
- if (dd->verbs_dev)
- dd->verbs_dev->ibdev.node_guid = new_guid;
-
- ret = strlen(buf);
- goto bail;
-
-invalid:
- ipath_dev_err(dd, "attempt to set invalid GUID\n");
- ret = -EINVAL;
-
-bail:
- return ret;
-}
-
-static ssize_t show_nguid(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
-
- return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_nguid);
-}
-
-static ssize_t show_nports(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
-
- /* Return the number of user ports available. */
- return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_cfgports - 1);
-}
-
-static ssize_t show_serial(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
-
- buf[sizeof dd->ipath_serial] = '\0';
- memcpy(buf, dd->ipath_serial, sizeof dd->ipath_serial);
- strcat(buf, "\n");
- return strlen(buf);
-}
-
-static ssize_t show_unit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
-
- return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_unit);
-}
-
-static ssize_t show_jint_max_packets(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
-
- return scnprintf(buf, PAGE_SIZE, "%hu\n", dd->ipath_jint_max_packets);
-}
-
-static ssize_t store_jint_max_packets(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- u16 v = 0;
- int ret;
-
- ret = ipath_parse_ushort(buf, &v);
- if (ret < 0)
- ipath_dev_err(dd, "invalid jint_max_packets.\n");
- else
- dd->ipath_f_config_jint(dd, dd->ipath_jint_idle_ticks, v);
-
- return ret;
-}
-
-static ssize_t show_jint_idle_ticks(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
-
- return scnprintf(buf, PAGE_SIZE, "%hu\n", dd->ipath_jint_idle_ticks);
-}
-
-static ssize_t store_jint_idle_ticks(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- u16 v = 0;
- int ret;
-
- ret = ipath_parse_ushort(buf, &v);
- if (ret < 0)
- ipath_dev_err(dd, "invalid jint_idle_ticks.\n");
- else
- dd->ipath_f_config_jint(dd, v, dd->ipath_jint_max_packets);
-
- return ret;
-}
-
-#define DEVICE_COUNTER(name, attr) \
- static ssize_t show_counter_##name(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
- { \
- struct ipath_devdata *dd = dev_get_drvdata(dev); \
- return scnprintf(\
- buf, PAGE_SIZE, "%llu\n", (unsigned long long) \
- ipath_snap_cntr( \
- dd, offsetof(struct infinipath_counters, \
- attr) / sizeof(u64))); \
- } \
- static DEVICE_ATTR(name, S_IRUGO, show_counter_##name, NULL);
-
-DEVICE_COUNTER(ib_link_downeds, IBLinkDownedCnt);
-DEVICE_COUNTER(ib_link_err_recoveries, IBLinkErrRecoveryCnt);
-DEVICE_COUNTER(ib_status_changes, IBStatusChangeCnt);
-DEVICE_COUNTER(ib_symbol_errs, IBSymbolErrCnt);
-DEVICE_COUNTER(lb_flow_stalls, LBFlowStallCnt);
-DEVICE_COUNTER(lb_ints, LBIntCnt);
-DEVICE_COUNTER(rx_bad_formats, RxBadFormatCnt);
-DEVICE_COUNTER(rx_buf_ovfls, RxBufOvflCnt);
-DEVICE_COUNTER(rx_data_pkts, RxDataPktCnt);
-DEVICE_COUNTER(rx_dropped_pkts, RxDroppedPktCnt);
-DEVICE_COUNTER(rx_dwords, RxDwordCnt);
-DEVICE_COUNTER(rx_ebps, RxEBPCnt);
-DEVICE_COUNTER(rx_flow_ctrl_errs, RxFlowCtrlErrCnt);
-DEVICE_COUNTER(rx_flow_pkts, RxFlowPktCnt);
-DEVICE_COUNTER(rx_icrc_errs, RxICRCErrCnt);
-DEVICE_COUNTER(rx_len_errs, RxLenErrCnt);
-DEVICE_COUNTER(rx_link_problems, RxLinkProblemCnt);
-DEVICE_COUNTER(rx_lpcrc_errs, RxLPCRCErrCnt);
-DEVICE_COUNTER(rx_max_min_len_errs, RxMaxMinLenErrCnt);
-DEVICE_COUNTER(rx_p0_hdr_egr_ovfls, RxP0HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p1_hdr_egr_ovfls, RxP1HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p2_hdr_egr_ovfls, RxP2HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p3_hdr_egr_ovfls, RxP3HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p4_hdr_egr_ovfls, RxP4HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p5_hdr_egr_ovfls, RxP5HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p6_hdr_egr_ovfls, RxP6HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p7_hdr_egr_ovfls, RxP7HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p8_hdr_egr_ovfls, RxP8HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_pkey_mismatches, RxPKeyMismatchCnt);
-DEVICE_COUNTER(rx_tid_full_errs, RxTIDFullErrCnt);
-DEVICE_COUNTER(rx_tid_valid_errs, RxTIDValidErrCnt);
-DEVICE_COUNTER(rx_vcrc_errs, RxVCRCErrCnt);
-DEVICE_COUNTER(tx_data_pkts, TxDataPktCnt);
-DEVICE_COUNTER(tx_dropped_pkts, TxDroppedPktCnt);
-DEVICE_COUNTER(tx_dwords, TxDwordCnt);
-DEVICE_COUNTER(tx_flow_pkts, TxFlowPktCnt);
-DEVICE_COUNTER(tx_flow_stalls, TxFlowStallCnt);
-DEVICE_COUNTER(tx_len_errs, TxLenErrCnt);
-DEVICE_COUNTER(tx_max_min_len_errs, TxMaxMinLenErrCnt);
-DEVICE_COUNTER(tx_underruns, TxUnderrunCnt);
-DEVICE_COUNTER(tx_unsup_vl_errs, TxUnsupVLErrCnt);
-
-static struct attribute *dev_counter_attributes[] = {
- &dev_attr_ib_link_downeds.attr,
- &dev_attr_ib_link_err_recoveries.attr,
- &dev_attr_ib_status_changes.attr,
- &dev_attr_ib_symbol_errs.attr,
- &dev_attr_lb_flow_stalls.attr,
- &dev_attr_lb_ints.attr,
- &dev_attr_rx_bad_formats.attr,
- &dev_attr_rx_buf_ovfls.attr,
- &dev_attr_rx_data_pkts.attr,
- &dev_attr_rx_dropped_pkts.attr,
- &dev_attr_rx_dwords.attr,
- &dev_attr_rx_ebps.attr,
- &dev_attr_rx_flow_ctrl_errs.attr,
- &dev_attr_rx_flow_pkts.attr,
- &dev_attr_rx_icrc_errs.attr,
- &dev_attr_rx_len_errs.attr,
- &dev_attr_rx_link_problems.attr,
- &dev_attr_rx_lpcrc_errs.attr,
- &dev_attr_rx_max_min_len_errs.attr,
- &dev_attr_rx_p0_hdr_egr_ovfls.attr,
- &dev_attr_rx_p1_hdr_egr_ovfls.attr,
- &dev_attr_rx_p2_hdr_egr_ovfls.attr,
- &dev_attr_rx_p3_hdr_egr_ovfls.attr,
- &dev_attr_rx_p4_hdr_egr_ovfls.attr,
- &dev_attr_rx_p5_hdr_egr_ovfls.attr,
- &dev_attr_rx_p6_hdr_egr_ovfls.attr,
- &dev_attr_rx_p7_hdr_egr_ovfls.attr,
- &dev_attr_rx_p8_hdr_egr_ovfls.attr,
- &dev_attr_rx_pkey_mismatches.attr,
- &dev_attr_rx_tid_full_errs.attr,
- &dev_attr_rx_tid_valid_errs.attr,
- &dev_attr_rx_vcrc_errs.attr,
- &dev_attr_tx_data_pkts.attr,
- &dev_attr_tx_dropped_pkts.attr,
- &dev_attr_tx_dwords.attr,
- &dev_attr_tx_flow_pkts.attr,
- &dev_attr_tx_flow_stalls.attr,
- &dev_attr_tx_len_errs.attr,
- &dev_attr_tx_max_min_len_errs.attr,
- &dev_attr_tx_underruns.attr,
- &dev_attr_tx_unsup_vl_errs.attr,
- NULL
-};
-
-static struct attribute_group dev_counter_attr_group = {
- .name = "counters",
- .attrs = dev_counter_attributes
-};
-
-static ssize_t store_reset(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- int ret;
-
- if (count < 5 || memcmp(buf, "reset", 5)) {
- ret = -EINVAL;
- goto bail;
- }
-
- if (dd->ipath_flags & IPATH_DISABLED) {
- /*
- * post-reset init would re-enable interrupts, etc.
- * so don't allow reset on disabled devices. Not
- * perfect error, but about the best choice.
- */
- dev_info(dev,"Unit %d is disabled, can't reset\n",
- dd->ipath_unit);
- ret = -EINVAL;
- goto bail;
- }
- ret = ipath_reset_device(dd->ipath_unit);
-bail:
- return ret<0 ? ret : count;
-}
-
-static ssize_t store_link_state(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- int ret, r;
- u16 state;
-
- ret = ipath_parse_ushort(buf, &state);
- if (ret < 0)
- goto invalid;
-
- r = ipath_set_linkstate(dd, state);
- if (r < 0) {
- ret = r;
- goto bail;
- }
-
- goto bail;
-invalid:
- ipath_dev_err(dd, "attempt to set invalid link state\n");
-bail:
- return ret;
-}
-
-static ssize_t show_mtu(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_ibmtu);
-}
-
-static ssize_t store_mtu(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- ssize_t ret;
- u16 mtu = 0;
- int r;
-
- ret = ipath_parse_ushort(buf, &mtu);
- if (ret < 0)
- goto invalid;
-
- r = ipath_set_mtu(dd, mtu);
- if (r < 0)
- ret = r;
-
- goto bail;
-invalid:
- ipath_dev_err(dd, "attempt to set invalid MTU\n");
-bail:
- return ret;
-}
-
-static ssize_t show_enabled(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%u\n",
- (dd->ipath_flags & IPATH_DISABLED) ? 0 : 1);
-}
-
-static ssize_t store_enabled(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- ssize_t ret;
- u16 enable = 0;
-
- ret = ipath_parse_ushort(buf, &enable);
- if (ret < 0) {
- ipath_dev_err(dd, "attempt to use non-numeric on enable\n");
- goto bail;
- }
-
- if (enable) {
- if (!(dd->ipath_flags & IPATH_DISABLED))
- goto bail;
-
- dev_info(dev, "Enabling unit %d\n", dd->ipath_unit);
- /* same as post-reset */
- ret = ipath_init_chip(dd, 1);
- if (ret)
- ipath_dev_err(dd, "Failed to enable unit %d\n",
- dd->ipath_unit);
- else {
- dd->ipath_flags &= ~IPATH_DISABLED;
- *dd->ipath_statusp &= ~IPATH_STATUS_ADMIN_DISABLED;
- }
- } else if (!(dd->ipath_flags & IPATH_DISABLED)) {
- dev_info(dev, "Disabling unit %d\n", dd->ipath_unit);
- ipath_shutdown_device(dd);
- dd->ipath_flags |= IPATH_DISABLED;
- *dd->ipath_statusp |= IPATH_STATUS_ADMIN_DISABLED;
- }
-
-bail:
- return ret;
-}
-
-static ssize_t store_rx_pol_inv(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- int ret, r;
- u16 val;
-
- ret = ipath_parse_ushort(buf, &val);
- if (ret < 0)
- goto invalid;
-
- r = ipath_set_rx_pol_inv(dd, val);
- if (r < 0) {
- ret = r;
- goto bail;
- }
-
- goto bail;
-invalid:
- ipath_dev_err(dd, "attempt to set invalid Rx Polarity invert\n");
-bail:
- return ret;
-}
-
-static ssize_t store_led_override(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- int ret;
- u16 val;
-
- ret = ipath_parse_ushort(buf, &val);
- if (ret > 0)
- ipath_set_led_override(dd, val);
- else
- ipath_dev_err(dd, "attempt to set invalid LED override\n");
- return ret;
-}
-
-static ssize_t show_logged_errs(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- int idx, count;
-
- /* force consistency with actual EEPROM */
- if (ipath_update_eeprom_log(dd) != 0)
- return -ENXIO;
-
- count = 0;
- for (idx = 0; idx < IPATH_EEP_LOG_CNT; ++idx) {
- count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
- dd->ipath_eep_st_errs[idx],
- idx == (IPATH_EEP_LOG_CNT - 1) ? '\n' : ' ');
- }
-
- return count;
-}
-
-/*
- * New sysfs entries to control various IB config. These all turn into
- * accesses via ipath_f_get/set_ib_cfg.
- *
- * Get/Set heartbeat enable. Or of 1=enabled, 2=auto
- */
-static ssize_t show_hrtbt_enb(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- int ret;
-
- ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_HRTBT);
- if (ret >= 0)
- ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
- return ret;
-}
-
-static ssize_t store_hrtbt_enb(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- int ret, r;
- u16 val;
-
- ret = ipath_parse_ushort(buf, &val);
- if (ret >= 0 && val > 3)
- ret = -EINVAL;
- if (ret < 0) {
- ipath_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
- goto bail;
- }
-
- /*
- * Set the "intentional" heartbeat enable per either of
- * "Enable" and "Auto", as these are normally set together.
- * This bit is consulted when leaving loopback mode,
- * because entering loopback mode overrides it and automatically
- * disables heartbeat.
- */
- r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT, val);
- if (r < 0)
- ret = r;
- else if (val == IPATH_IB_HRTBT_OFF)
- dd->ipath_flags |= IPATH_NO_HRTBT;
- else
- dd->ipath_flags &= ~IPATH_NO_HRTBT;
-
-bail:
- return ret;
-}
-
-/*
- * Get/Set Link-widths enabled. Or of 1=1x, 2=4x (this is human/IB centric,
- * _not_ the particular encoding of any given chip)
- */
-static ssize_t show_lwid_enb(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- int ret;
-
- ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB);
- if (ret >= 0)
- ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
- return ret;
-}
-
-static ssize_t store_lwid_enb(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- int ret, r;
- u16 val;
-
- ret = ipath_parse_ushort(buf, &val);
- if (ret >= 0 && (val == 0 || val > 3))
- ret = -EINVAL;
- if (ret < 0) {
- ipath_dev_err(dd,
- "attempt to set invalid Link Width (enable)\n");
- goto bail;
- }
-
- r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, val);
- if (r < 0)
- ret = r;
-
-bail:
- return ret;
-}
-
-/* Get current link width */
-static ssize_t show_lwid(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- int ret;
-
- ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LWID);
- if (ret >= 0)
- ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
- return ret;
-}
-
-/*
- * Get/Set Link-speeds enabled. Or of 1=SDR 2=DDR.
- */
-static ssize_t show_spd_enb(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- int ret;
-
- ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB);
- if (ret >= 0)
- ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
- return ret;
-}
-
-static ssize_t store_spd_enb(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- int ret, r;
- u16 val;
-
- ret = ipath_parse_ushort(buf, &val);
- if (ret >= 0 && (val == 0 || val > (IPATH_IB_SDR | IPATH_IB_DDR)))
- ret = -EINVAL;
- if (ret < 0) {
- ipath_dev_err(dd,
- "attempt to set invalid Link Speed (enable)\n");
- goto bail;
- }
-
- r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, val);
- if (r < 0)
- ret = r;
-
-bail:
- return ret;
-}
-
-/* Get current link speed */
-static ssize_t show_spd(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- int ret;
-
- ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_SPD);
- if (ret >= 0)
- ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
- return ret;
-}
-
-/*
- * Get/Set RX polarity-invert enable. 0=no, 1=yes.
- */
-static ssize_t show_rx_polinv_enb(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- int ret;
-
- ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB);
- if (ret >= 0)
- ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
- return ret;
-}
-
-static ssize_t store_rx_polinv_enb(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- int ret, r;
- u16 val;
-
- ret = ipath_parse_ushort(buf, &val);
- if (ret >= 0 && val > 1) {
- ipath_dev_err(dd,
- "attempt to set invalid Rx Polarity (enable)\n");
- ret = -EINVAL;
- goto bail;
- }
-
- r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB, val);
- if (r < 0)
- ret = r;
-
-bail:
- return ret;
-}
-
-/*
- * Get/Set RX lane-reversal enable. 0=no, 1=yes.
- */
-static ssize_t show_lanerev_enb(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- int ret;
-
- ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LREV_ENB);
- if (ret >= 0)
- ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
- return ret;
-}
-
-static ssize_t store_lanerev_enb(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- int ret, r;
- u16 val;
-
- ret = ipath_parse_ushort(buf, &val);
- if (ret >= 0 && val > 1) {
- ret = -EINVAL;
- ipath_dev_err(dd,
- "attempt to set invalid Lane reversal (enable)\n");
- goto bail;
- }
-
- r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LREV_ENB, val);
- if (r < 0)
- ret = r;
-
-bail:
- return ret;
-}
-
-static DRIVER_ATTR(num_units, S_IRUGO, show_num_units, NULL);
-static DRIVER_ATTR(version, S_IRUGO, show_version, NULL);
-
-static struct attribute *driver_attributes[] = {
- &driver_attr_num_units.attr,
- &driver_attr_version.attr,
- NULL
-};
-
-static struct attribute_group driver_attr_group = {
- .attrs = driver_attributes
-};
-
-static ssize_t store_tempsense(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- int ret, stat;
- u16 val;
-
- ret = ipath_parse_ushort(buf, &val);
- if (ret <= 0) {
- ipath_dev_err(dd, "attempt to set invalid tempsense config\n");
- goto bail;
- }
- /* If anything but the highest limit, enable T_CRIT_A "interrupt" */
- stat = ipath_tempsense_write(dd, 9, (val == 0x7f7f) ? 0x80 : 0);
- if (stat) {
- ipath_dev_err(dd, "Unable to set tempsense config\n");
- ret = -1;
- goto bail;
- }
- stat = ipath_tempsense_write(dd, 0xB, (u8) (val & 0xFF));
- if (stat) {
- ipath_dev_err(dd, "Unable to set local Tcrit\n");
- ret = -1;
- goto bail;
- }
- stat = ipath_tempsense_write(dd, 0xD, (u8) (val >> 8));
- if (stat) {
- ipath_dev_err(dd, "Unable to set remote Tcrit\n");
- ret = -1;
- goto bail;
- }
-
-bail:
- return ret;
-}
-
-/*
- * dump tempsense regs. in decimal, to ease shell-scripts.
- */
-static ssize_t show_tempsense(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ipath_devdata *dd = dev_get_drvdata(dev);
- int ret;
- int idx;
- u8 regvals[8];
-
- ret = -ENXIO;
- for (idx = 0; idx < 8; ++idx) {
- if (idx == 6)
- continue;
- ret = ipath_tempsense_read(dd, idx);
- if (ret < 0)
- break;
- regvals[idx] = ret;
- }
- if (idx == 8)
- ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n",
- *(signed char *)(regvals),
- *(signed char *)(regvals + 1),
- regvals[2], regvals[3],
- *(signed char *)(regvals + 5),
- *(signed char *)(regvals + 7));
- return ret;
-}
-
-const struct attribute_group *ipath_driver_attr_groups[] = {
- &driver_attr_group,
- NULL,
-};
-
-static DEVICE_ATTR(guid, S_IWUSR | S_IRUGO, show_guid, store_guid);
-static DEVICE_ATTR(lmc, S_IWUSR | S_IRUGO, show_lmc, store_lmc);
-static DEVICE_ATTR(lid, S_IWUSR | S_IRUGO, show_lid, store_lid);
-static DEVICE_ATTR(link_state, S_IWUSR, NULL, store_link_state);
-static DEVICE_ATTR(mlid, S_IWUSR | S_IRUGO, show_mlid, store_mlid);
-static DEVICE_ATTR(mtu, S_IWUSR | S_IRUGO, show_mtu, store_mtu);
-static DEVICE_ATTR(enabled, S_IWUSR | S_IRUGO, show_enabled, store_enabled);
-static DEVICE_ATTR(nguid, S_IRUGO, show_nguid, NULL);
-static DEVICE_ATTR(nports, S_IRUGO, show_nports, NULL);
-static DEVICE_ATTR(reset, S_IWUSR, NULL, store_reset);
-static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
-static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
-static DEVICE_ATTR(status_str, S_IRUGO, show_status_str, NULL);
-static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
-static DEVICE_ATTR(unit, S_IRUGO, show_unit, NULL);
-static DEVICE_ATTR(rx_pol_inv, S_IWUSR, NULL, store_rx_pol_inv);
-static DEVICE_ATTR(led_override, S_IWUSR, NULL, store_led_override);
-static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
-static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
-static DEVICE_ATTR(jint_max_packets, S_IWUSR | S_IRUGO,
- show_jint_max_packets, store_jint_max_packets);
-static DEVICE_ATTR(jint_idle_ticks, S_IWUSR | S_IRUGO,
- show_jint_idle_ticks, store_jint_idle_ticks);
-static DEVICE_ATTR(tempsense, S_IWUSR | S_IRUGO,
- show_tempsense, store_tempsense);
-
-static struct attribute *dev_attributes[] = {
- &dev_attr_guid.attr,
- &dev_attr_lmc.attr,
- &dev_attr_lid.attr,
- &dev_attr_link_state.attr,
- &dev_attr_mlid.attr,
- &dev_attr_mtu.attr,
- &dev_attr_nguid.attr,
- &dev_attr_nports.attr,
- &dev_attr_serial.attr,
- &dev_attr_status.attr,
- &dev_attr_status_str.attr,
- &dev_attr_boardversion.attr,
- &dev_attr_unit.attr,
- &dev_attr_enabled.attr,
- &dev_attr_rx_pol_inv.attr,
- &dev_attr_led_override.attr,
- &dev_attr_logged_errors.attr,
- &dev_attr_tempsense.attr,
- &dev_attr_localbus_info.attr,
- NULL
-};
-
-static struct attribute_group dev_attr_group = {
- .attrs = dev_attributes
-};
-
-static DEVICE_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
- store_hrtbt_enb);
-static DEVICE_ATTR(link_width_enable, S_IWUSR | S_IRUGO, show_lwid_enb,
- store_lwid_enb);
-static DEVICE_ATTR(link_width, S_IRUGO, show_lwid, NULL);
-static DEVICE_ATTR(link_speed_enable, S_IWUSR | S_IRUGO, show_spd_enb,
- store_spd_enb);
-static DEVICE_ATTR(link_speed, S_IRUGO, show_spd, NULL);
-static DEVICE_ATTR(rx_pol_inv_enable, S_IWUSR | S_IRUGO, show_rx_polinv_enb,
- store_rx_polinv_enb);
-static DEVICE_ATTR(rx_lane_rev_enable, S_IWUSR | S_IRUGO, show_lanerev_enb,
- store_lanerev_enb);
-
-static struct attribute *dev_ibcfg_attributes[] = {
- &dev_attr_hrtbt_enable.attr,
- &dev_attr_link_width_enable.attr,
- &dev_attr_link_width.attr,
- &dev_attr_link_speed_enable.attr,
- &dev_attr_link_speed.attr,
- &dev_attr_rx_pol_inv_enable.attr,
- &dev_attr_rx_lane_rev_enable.attr,
- NULL
-};
-
-static struct attribute_group dev_ibcfg_attr_group = {
- .attrs = dev_ibcfg_attributes
-};
-
-/**
- * ipath_expose_reset - create a device reset file
- * @dev: the device structure
- *
- * Only expose a file that lets us reset the device after someone
- * enters diag mode. A device reset is quite likely to crash the
- * machine entirely, so we don't want to normally make it
- * available.
- *
- * Called with ipath_mutex held.
- */
-int ipath_expose_reset(struct device *dev)
-{
- static int exposed;
- int ret;
-
- if (!exposed) {
- ret = device_create_file(dev, &dev_attr_reset);
- exposed = 1;
- } else {
- ret = 0;
- }
-
- return ret;
-}
-
-int ipath_device_create_group(struct device *dev, struct ipath_devdata *dd)
-{
- int ret;
-
- ret = sysfs_create_group(&dev->kobj, &dev_attr_group);
- if (ret)
- goto bail;
-
- ret = sysfs_create_group(&dev->kobj, &dev_counter_attr_group);
- if (ret)
- goto bail_attrs;
-
- if (dd->ipath_flags & IPATH_HAS_MULT_IB_SPEED) {
- ret = device_create_file(dev, &dev_attr_jint_idle_ticks);
- if (ret)
- goto bail_counter;
- ret = device_create_file(dev, &dev_attr_jint_max_packets);
- if (ret)
- goto bail_idle;
-
- ret = sysfs_create_group(&dev->kobj, &dev_ibcfg_attr_group);
- if (ret)
- goto bail_max;
- }
-
- return 0;
-
-bail_max:
- device_remove_file(dev, &dev_attr_jint_max_packets);
-bail_idle:
- device_remove_file(dev, &dev_attr_jint_idle_ticks);
-bail_counter:
- sysfs_remove_group(&dev->kobj, &dev_counter_attr_group);
-bail_attrs:
- sysfs_remove_group(&dev->kobj, &dev_attr_group);
-bail:
- return ret;
-}
-
-void ipath_device_remove_group(struct device *dev, struct ipath_devdata *dd)
-{
- sysfs_remove_group(&dev->kobj, &dev_counter_attr_group);
-
- if (dd->ipath_flags & IPATH_HAS_MULT_IB_SPEED) {
- sysfs_remove_group(&dev->kobj, &dev_ibcfg_attr_group);
- device_remove_file(dev, &dev_attr_jint_idle_ticks);
- device_remove_file(dev, &dev_attr_jint_max_packets);
- }
-
- sysfs_remove_group(&dev->kobj, &dev_attr_group);
-
- device_remove_file(dev, &dev_attr_reset);
-}
diff --git a/drivers/staging/rdma/ipath/ipath_uc.c b/drivers/staging/rdma/ipath/ipath_uc.c
deleted file mode 100644
index 0246b30280b9..000000000000
--- a/drivers/staging/rdma/ipath/ipath_uc.c
+++ /dev/null
@@ -1,547 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "ipath_verbs.h"
-#include "ipath_kernel.h"
-
-/* cut down ridiculously long IB macro names */
-#define OP(x) IB_OPCODE_UC_##x
-
-/**
- * ipath_make_uc_req - construct a request packet (SEND, RDMA write)
- * @qp: a pointer to the QP
- *
- * Return 1 if constructed; otherwise, return 0.
- */
-int ipath_make_uc_req(struct ipath_qp *qp)
-{
- struct ipath_other_headers *ohdr;
- struct ipath_swqe *wqe;
- unsigned long flags;
- u32 hwords;
- u32 bth0;
- u32 len;
- u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
- int ret = 0;
-
- spin_lock_irqsave(&qp->s_lock, flags);
-
- if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) {
- if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND))
- goto bail;
- /* We are in the error state, flush the work request. */
- if (qp->s_last == qp->s_head)
- goto bail;
- /* If DMAs are in progress, we can't flush immediately. */
- if (atomic_read(&qp->s_dma_busy)) {
- qp->s_flags |= IPATH_S_WAIT_DMA;
- goto bail;
- }
- wqe = get_swqe_ptr(qp, qp->s_last);
- ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
- goto done;
- }
-
- ohdr = &qp->s_hdr.u.oth;
- if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
- ohdr = &qp->s_hdr.u.l.oth;
-
- /* header size in 32-bit words LRH+BTH = (8+12)/4. */
- hwords = 5;
- bth0 = 1 << 22; /* Set M bit */
-
- /* Get the next send request. */
- wqe = get_swqe_ptr(qp, qp->s_cur);
- qp->s_wqe = NULL;
- switch (qp->s_state) {
- default:
- if (!(ib_ipath_state_ops[qp->state] &
- IPATH_PROCESS_NEXT_SEND_OK))
- goto bail;
- /* Check if send work queue is empty. */
- if (qp->s_cur == qp->s_head)
- goto bail;
- /*
- * Start a new request.
- */
- qp->s_psn = wqe->psn = qp->s_next_psn;
- qp->s_sge.sge = wqe->sg_list[0];
- qp->s_sge.sg_list = wqe->sg_list + 1;
- qp->s_sge.num_sge = wqe->wr.num_sge;
- qp->s_len = len = wqe->length;
- switch (wqe->wr.opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_IMM:
- if (len > pmtu) {
- qp->s_state = OP(SEND_FIRST);
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_SEND)
- qp->s_state = OP(SEND_ONLY);
- else {
- qp->s_state =
- OP(SEND_ONLY_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- }
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= 1 << 23;
- qp->s_wqe = wqe;
- if (++qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
-
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- ohdr->u.rc.reth.vaddr =
- cpu_to_be64(wqe->rdma_wr.remote_addr);
- ohdr->u.rc.reth.rkey =
- cpu_to_be32(wqe->rdma_wr.rkey);
- ohdr->u.rc.reth.length = cpu_to_be32(len);
- hwords += sizeof(struct ib_reth) / 4;
- if (len > pmtu) {
- qp->s_state = OP(RDMA_WRITE_FIRST);
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
- qp->s_state = OP(RDMA_WRITE_ONLY);
- else {
- qp->s_state =
- OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
- /* Immediate data comes after the RETH */
- ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= 1 << 23;
- }
- qp->s_wqe = wqe;
- if (++qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
-
- default:
- goto bail;
- }
- break;
-
- case OP(SEND_FIRST):
- qp->s_state = OP(SEND_MIDDLE);
- /* FALLTHROUGH */
- case OP(SEND_MIDDLE):
- len = qp->s_len;
- if (len > pmtu) {
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_SEND)
- qp->s_state = OP(SEND_LAST);
- else {
- qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- }
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= 1 << 23;
- qp->s_wqe = wqe;
- if (++qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
-
- case OP(RDMA_WRITE_FIRST):
- qp->s_state = OP(RDMA_WRITE_MIDDLE);
- /* FALLTHROUGH */
- case OP(RDMA_WRITE_MIDDLE):
- len = qp->s_len;
- if (len > pmtu) {
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
- qp->s_state = OP(RDMA_WRITE_LAST);
- else {
- qp->s_state =
- OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= 1 << 23;
- }
- qp->s_wqe = wqe;
- if (++qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
- }
- qp->s_len -= len;
- qp->s_hdrwords = hwords;
- qp->s_cur_sge = &qp->s_sge;
- qp->s_cur_size = len;
- ipath_make_ruc_header(to_idev(qp->ibqp.device),
- qp, ohdr, bth0 | (qp->s_state << 24),
- qp->s_next_psn++ & IPATH_PSN_MASK);
-done:
- ret = 1;
- goto unlock;
-
-bail:
- qp->s_flags &= ~IPATH_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
- return ret;
-}
-
-/**
- * ipath_uc_rcv - handle an incoming UC packet
- * @dev: the device the packet came in on
- * @hdr: the header of the packet
- * @has_grh: true if the packet has a GRH
- * @data: the packet data
- * @tlen: the length of the packet
- * @qp: the QP for this packet.
- *
- * This is called from ipath_qp_rcv() to process an incoming UC packet
- * for the given QP.
- * Called at interrupt level.
- */
-void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
-{
- struct ipath_other_headers *ohdr;
- int opcode;
- u32 hdrsize;
- u32 psn;
- u32 pad;
- struct ib_wc wc;
- u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
- struct ib_reth *reth;
- int header_in_data;
-
- /* Validate the SLID. See Ch. 9.6.1.5 */
- if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid))
- goto done;
-
- /* Check for GRH */
- if (!has_grh) {
- ohdr = &hdr->u.oth;
- hdrsize = 8 + 12; /* LRH + BTH */
- psn = be32_to_cpu(ohdr->bth[2]);
- header_in_data = 0;
- } else {
- ohdr = &hdr->u.l.oth;
- hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
- /*
- * The header with GRH is 60 bytes and the
- * core driver sets the eager header buffer
- * size to 56 bytes so the last 4 bytes of
- * the BTH header (PSN) is in the data buffer.
- */
- header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
- if (header_in_data) {
- psn = be32_to_cpu(((__be32 *) data)[0]);
- data += sizeof(__be32);
- } else
- psn = be32_to_cpu(ohdr->bth[2]);
- }
- /*
- * The opcode is in the low byte when its in network order
- * (top byte when in host order).
- */
- opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
-
- memset(&wc, 0, sizeof wc);
-
- /* Compare the PSN verses the expected PSN. */
- if (unlikely(ipath_cmp24(psn, qp->r_psn) != 0)) {
- /*
- * Handle a sequence error.
- * Silently drop any current message.
- */
- qp->r_psn = psn;
- inv:
- qp->r_state = OP(SEND_LAST);
- switch (opcode) {
- case OP(SEND_FIRST):
- case OP(SEND_ONLY):
- case OP(SEND_ONLY_WITH_IMMEDIATE):
- goto send_first;
-
- case OP(RDMA_WRITE_FIRST):
- case OP(RDMA_WRITE_ONLY):
- case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
- goto rdma_first;
-
- default:
- dev->n_pkt_drops++;
- goto done;
- }
- }
-
- /* Check for opcode sequence errors. */
- switch (qp->r_state) {
- case OP(SEND_FIRST):
- case OP(SEND_MIDDLE):
- if (opcode == OP(SEND_MIDDLE) ||
- opcode == OP(SEND_LAST) ||
- opcode == OP(SEND_LAST_WITH_IMMEDIATE))
- break;
- goto inv;
-
- case OP(RDMA_WRITE_FIRST):
- case OP(RDMA_WRITE_MIDDLE):
- if (opcode == OP(RDMA_WRITE_MIDDLE) ||
- opcode == OP(RDMA_WRITE_LAST) ||
- opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
- break;
- goto inv;
-
- default:
- if (opcode == OP(SEND_FIRST) ||
- opcode == OP(SEND_ONLY) ||
- opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
- opcode == OP(RDMA_WRITE_FIRST) ||
- opcode == OP(RDMA_WRITE_ONLY) ||
- opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
- break;
- goto inv;
- }
-
- /* OK, process the packet. */
- switch (opcode) {
- case OP(SEND_FIRST):
- case OP(SEND_ONLY):
- case OP(SEND_ONLY_WITH_IMMEDIATE):
- send_first:
- if (qp->r_flags & IPATH_R_REUSE_SGE) {
- qp->r_flags &= ~IPATH_R_REUSE_SGE;
- qp->r_sge = qp->s_rdma_read_sge;
- } else if (!ipath_get_rwqe(qp, 0)) {
- dev->n_pkt_drops++;
- goto done;
- }
- /* Save the WQE so we can reuse it in case of an error. */
- qp->s_rdma_read_sge = qp->r_sge;
- qp->r_rcv_len = 0;
- if (opcode == OP(SEND_ONLY))
- goto send_last;
- else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
- goto send_last_imm;
- /* FALLTHROUGH */
- case OP(SEND_MIDDLE):
- /* Check for invalid length PMTU or posted rwqe len. */
- if (unlikely(tlen != (hdrsize + pmtu + 4))) {
- qp->r_flags |= IPATH_R_REUSE_SGE;
- dev->n_pkt_drops++;
- goto done;
- }
- qp->r_rcv_len += pmtu;
- if (unlikely(qp->r_rcv_len > qp->r_len)) {
- qp->r_flags |= IPATH_R_REUSE_SGE;
- dev->n_pkt_drops++;
- goto done;
- }
- ipath_copy_sge(&qp->r_sge, data, pmtu);
- break;
-
- case OP(SEND_LAST_WITH_IMMEDIATE):
- send_last_imm:
- if (header_in_data) {
- wc.ex.imm_data = *(__be32 *) data;
- data += sizeof(__be32);
- } else {
- /* Immediate data comes after BTH */
- wc.ex.imm_data = ohdr->u.imm_data;
- }
- hdrsize += 4;
- wc.wc_flags = IB_WC_WITH_IMM;
- /* FALLTHROUGH */
- case OP(SEND_LAST):
- send_last:
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /* Check for invalid length. */
- /* XXX LAST len should be >= 1 */
- if (unlikely(tlen < (hdrsize + pad + 4))) {
- qp->r_flags |= IPATH_R_REUSE_SGE;
- dev->n_pkt_drops++;
- goto done;
- }
- /* Don't count the CRC. */
- tlen -= (hdrsize + pad + 4);
- wc.byte_len = tlen + qp->r_rcv_len;
- if (unlikely(wc.byte_len > qp->r_len)) {
- qp->r_flags |= IPATH_R_REUSE_SGE;
- dev->n_pkt_drops++;
- goto done;
- }
- wc.opcode = IB_WC_RECV;
- last_imm:
- ipath_copy_sge(&qp->r_sge, data, tlen);
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.qp = &qp->ibqp;
- wc.src_qp = qp->remote_qpn;
- wc.slid = qp->remote_ah_attr.dlid;
- wc.sl = qp->remote_ah_attr.sl;
- /* Signal completion event if the solicited bit is set. */
- ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
- (ohdr->bth[0] &
- cpu_to_be32(1 << 23)) != 0);
- break;
-
- case OP(RDMA_WRITE_FIRST):
- case OP(RDMA_WRITE_ONLY):
- case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
- rdma_first:
- /* RETH comes after BTH */
- if (!header_in_data)
- reth = &ohdr->u.rc.reth;
- else {
- reth = (struct ib_reth *)data;
- data += sizeof(*reth);
- }
- hdrsize += sizeof(*reth);
- qp->r_len = be32_to_cpu(reth->length);
- qp->r_rcv_len = 0;
- if (qp->r_len != 0) {
- u32 rkey = be32_to_cpu(reth->rkey);
- u64 vaddr = be64_to_cpu(reth->vaddr);
- int ok;
-
- /* Check rkey */
- ok = ipath_rkey_ok(qp, &qp->r_sge, qp->r_len,
- vaddr, rkey,
- IB_ACCESS_REMOTE_WRITE);
- if (unlikely(!ok)) {
- dev->n_pkt_drops++;
- goto done;
- }
- } else {
- qp->r_sge.sg_list = NULL;
- qp->r_sge.sge.mr = NULL;
- qp->r_sge.sge.vaddr = NULL;
- qp->r_sge.sge.length = 0;
- qp->r_sge.sge.sge_length = 0;
- }
- if (unlikely(!(qp->qp_access_flags &
- IB_ACCESS_REMOTE_WRITE))) {
- dev->n_pkt_drops++;
- goto done;
- }
- if (opcode == OP(RDMA_WRITE_ONLY))
- goto rdma_last;
- else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
- goto rdma_last_imm;
- /* FALLTHROUGH */
- case OP(RDMA_WRITE_MIDDLE):
- /* Check for invalid length PMTU or posted rwqe len. */
- if (unlikely(tlen != (hdrsize + pmtu + 4))) {
- dev->n_pkt_drops++;
- goto done;
- }
- qp->r_rcv_len += pmtu;
- if (unlikely(qp->r_rcv_len > qp->r_len)) {
- dev->n_pkt_drops++;
- goto done;
- }
- ipath_copy_sge(&qp->r_sge, data, pmtu);
- break;
-
- case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
- rdma_last_imm:
- if (header_in_data) {
- wc.ex.imm_data = *(__be32 *) data;
- data += sizeof(__be32);
- } else {
- /* Immediate data comes after BTH */
- wc.ex.imm_data = ohdr->u.imm_data;
- }
- hdrsize += 4;
- wc.wc_flags = IB_WC_WITH_IMM;
-
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /* Check for invalid length. */
- /* XXX LAST len should be >= 1 */
- if (unlikely(tlen < (hdrsize + pad + 4))) {
- dev->n_pkt_drops++;
- goto done;
- }
- /* Don't count the CRC. */
- tlen -= (hdrsize + pad + 4);
- if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
- dev->n_pkt_drops++;
- goto done;
- }
- if (qp->r_flags & IPATH_R_REUSE_SGE)
- qp->r_flags &= ~IPATH_R_REUSE_SGE;
- else if (!ipath_get_rwqe(qp, 1)) {
- dev->n_pkt_drops++;
- goto done;
- }
- wc.byte_len = qp->r_len;
- wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- goto last_imm;
-
- case OP(RDMA_WRITE_LAST):
- rdma_last:
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /* Check for invalid length. */
- /* XXX LAST len should be >= 1 */
- if (unlikely(tlen < (hdrsize + pad + 4))) {
- dev->n_pkt_drops++;
- goto done;
- }
- /* Don't count the CRC. */
- tlen -= (hdrsize + pad + 4);
- if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
- dev->n_pkt_drops++;
- goto done;
- }
- ipath_copy_sge(&qp->r_sge, data, tlen);
- break;
-
- default:
- /* Drop packet for unknown opcodes. */
- dev->n_pkt_drops++;
- goto done;
- }
- qp->r_psn++;
- qp->r_state = opcode;
-done:
- return;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_ud.c b/drivers/staging/rdma/ipath/ipath_ud.c
deleted file mode 100644
index 385d9410a51e..000000000000
--- a/drivers/staging/rdma/ipath/ipath_ud.c
+++ /dev/null
@@ -1,579 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/ib_smi.h>
-
-#include "ipath_verbs.h"
-#include "ipath_kernel.h"
-
-/**
- * ipath_ud_loopback - handle send on loopback QPs
- * @sqp: the sending QP
- * @swqe: the send work request
- *
- * This is called from ipath_make_ud_req() to forward a WQE addressed
- * to the same HCA.
- * Note that the receive interrupt handler may be calling ipath_ud_rcv()
- * while this is being called.
- */
-static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe)
-{
- struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
- struct ipath_qp *qp;
- struct ib_ah_attr *ah_attr;
- unsigned long flags;
- struct ipath_rq *rq;
- struct ipath_srq *srq;
- struct ipath_sge_state rsge;
- struct ipath_sge *sge;
- struct ipath_rwq *wq;
- struct ipath_rwqe *wqe;
- void (*handler)(struct ib_event *, void *);
- struct ib_wc wc;
- u32 tail;
- u32 rlen;
- u32 length;
-
- qp = ipath_lookup_qpn(&dev->qp_table, swqe->ud_wr.remote_qpn);
- if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
- dev->n_pkt_drops++;
- goto done;
- }
-
- /*
- * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
- * Qkeys with the high order bit set mean use the
- * qkey from the QP context instead of the WR (see 10.2.5).
- */
- if (unlikely(qp->ibqp.qp_num &&
- ((int) swqe->ud_wr.remote_qkey < 0 ?
- sqp->qkey : swqe->ud_wr.remote_qkey) != qp->qkey)) {
- /* XXX OK to lose a count once in a while. */
- dev->qkey_violations++;
- dev->n_pkt_drops++;
- goto drop;
- }
-
- /*
- * A GRH is expected to precede the data even if not
- * present on the wire.
- */
- length = swqe->length;
- memset(&wc, 0, sizeof wc);
- wc.byte_len = length + sizeof(struct ib_grh);
-
- if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = swqe->wr.ex.imm_data;
- }
-
- /*
- * This would be a lot simpler if we could call ipath_get_rwqe()
- * but that uses state that the receive interrupt handler uses
- * so we would need to lock out receive interrupts while doing
- * local loopback.
- */
- if (qp->ibqp.srq) {
- srq = to_isrq(qp->ibqp.srq);
- handler = srq->ibsrq.event_handler;
- rq = &srq->rq;
- } else {
- srq = NULL;
- handler = NULL;
- rq = &qp->r_rq;
- }
-
- /*
- * Get the next work request entry to find where to put the data.
- * Note that it is safe to drop the lock after changing rq->tail
- * since ipath_post_receive() won't fill the empty slot.
- */
- spin_lock_irqsave(&rq->lock, flags);
- wq = rq->wq;
- tail = wq->tail;
- /* Validate tail before using it since it is user writable. */
- if (tail >= rq->size)
- tail = 0;
- if (unlikely(tail == wq->head)) {
- spin_unlock_irqrestore(&rq->lock, flags);
- dev->n_pkt_drops++;
- goto drop;
- }
- wqe = get_rwqe_ptr(rq, tail);
- rsge.sg_list = qp->r_ud_sg_list;
- if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) {
- spin_unlock_irqrestore(&rq->lock, flags);
- dev->n_pkt_drops++;
- goto drop;
- }
- /* Silently drop packets which are too big. */
- if (wc.byte_len > rlen) {
- spin_unlock_irqrestore(&rq->lock, flags);
- dev->n_pkt_drops++;
- goto drop;
- }
- if (++tail >= rq->size)
- tail = 0;
- wq->tail = tail;
- wc.wr_id = wqe->wr_id;
- if (handler) {
- u32 n;
-
- /*
- * validate head pointer value and compute
- * the number of remaining WQEs.
- */
- n = wq->head;
- if (n >= rq->size)
- n = 0;
- if (n < tail)
- n += rq->size - tail;
- else
- n -= tail;
- if (n < srq->limit) {
- struct ib_event ev;
-
- srq->limit = 0;
- spin_unlock_irqrestore(&rq->lock, flags);
- ev.device = qp->ibqp.device;
- ev.element.srq = qp->ibqp.srq;
- ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
- handler(&ev, srq->ibsrq.srq_context);
- } else
- spin_unlock_irqrestore(&rq->lock, flags);
- } else
- spin_unlock_irqrestore(&rq->lock, flags);
-
- ah_attr = &to_iah(swqe->ud_wr.ah)->attr;
- if (ah_attr->ah_flags & IB_AH_GRH) {
- ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh));
- wc.wc_flags |= IB_WC_GRH;
- } else
- ipath_skip_sge(&rsge, sizeof(struct ib_grh));
- sge = swqe->sg_list;
- while (length) {
- u32 len = sge->length;
-
- if (len > length)
- len = length;
- if (len > sge->sge_length)
- len = sge->sge_length;
- BUG_ON(len == 0);
- ipath_copy_sge(&rsge, sge->vaddr, len);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (--swqe->wr.num_sge)
- sge++;
- } else if (sge->length == 0 && sge->mr != NULL) {
- if (++sge->n >= IPATH_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- length -= len;
- }
- wc.status = IB_WC_SUCCESS;
- wc.opcode = IB_WC_RECV;
- wc.qp = &qp->ibqp;
- wc.src_qp = sqp->ibqp.qp_num;
- /* XXX do we know which pkey matched? Only needed for GSI. */
- wc.pkey_index = 0;
- wc.slid = dev->dd->ipath_lid |
- (ah_attr->src_path_bits &
- ((1 << dev->dd->ipath_lmc) - 1));
- wc.sl = ah_attr->sl;
- wc.dlid_path_bits =
- ah_attr->dlid & ((1 << dev->dd->ipath_lmc) - 1);
- wc.port_num = 1;
- /* Signal completion event if the solicited bit is set. */
- ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
- swqe->ud_wr.wr.send_flags & IB_SEND_SOLICITED);
-drop:
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
-done:;
-}
-
-/**
- * ipath_make_ud_req - construct a UD request packet
- * @qp: the QP
- *
- * Return 1 if constructed; otherwise, return 0.
- */
-int ipath_make_ud_req(struct ipath_qp *qp)
-{
- struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
- struct ipath_other_headers *ohdr;
- struct ib_ah_attr *ah_attr;
- struct ipath_swqe *wqe;
- unsigned long flags;
- u32 nwords;
- u32 extra_bytes;
- u32 bth0;
- u16 lrh0;
- u16 lid;
- int ret = 0;
- int next_cur;
-
- spin_lock_irqsave(&qp->s_lock, flags);
-
- if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_NEXT_SEND_OK)) {
- if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND))
- goto bail;
- /* We are in the error state, flush the work request. */
- if (qp->s_last == qp->s_head)
- goto bail;
- /* If DMAs are in progress, we can't flush immediately. */
- if (atomic_read(&qp->s_dma_busy)) {
- qp->s_flags |= IPATH_S_WAIT_DMA;
- goto bail;
- }
- wqe = get_swqe_ptr(qp, qp->s_last);
- ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
- goto done;
- }
-
- if (qp->s_cur == qp->s_head)
- goto bail;
-
- wqe = get_swqe_ptr(qp, qp->s_cur);
- next_cur = qp->s_cur + 1;
- if (next_cur >= qp->s_size)
- next_cur = 0;
-
- /* Construct the header. */
- ah_attr = &to_iah(wqe->ud_wr.ah)->attr;
- if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE) {
- if (ah_attr->dlid != IPATH_PERMISSIVE_LID)
- dev->n_multicast_xmit++;
- else
- dev->n_unicast_xmit++;
- } else {
- dev->n_unicast_xmit++;
- lid = ah_attr->dlid & ~((1 << dev->dd->ipath_lmc) - 1);
- if (unlikely(lid == dev->dd->ipath_lid)) {
- /*
- * If DMAs are in progress, we can't generate
- * a completion for the loopback packet since
- * it would be out of order.
- * XXX Instead of waiting, we could queue a
- * zero length descriptor so we get a callback.
- */
- if (atomic_read(&qp->s_dma_busy)) {
- qp->s_flags |= IPATH_S_WAIT_DMA;
- goto bail;
- }
- qp->s_cur = next_cur;
- spin_unlock_irqrestore(&qp->s_lock, flags);
- ipath_ud_loopback(qp, wqe);
- spin_lock_irqsave(&qp->s_lock, flags);
- ipath_send_complete(qp, wqe, IB_WC_SUCCESS);
- goto done;
- }
- }
-
- qp->s_cur = next_cur;
- extra_bytes = -wqe->length & 3;
- nwords = (wqe->length + extra_bytes) >> 2;
-
- /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
- qp->s_hdrwords = 7;
- qp->s_cur_size = wqe->length;
- qp->s_cur_sge = &qp->s_sge;
- qp->s_dmult = ah_attr->static_rate;
- qp->s_wqe = wqe;
- qp->s_sge.sge = wqe->sg_list[0];
- qp->s_sge.sg_list = wqe->sg_list + 1;
- qp->s_sge.num_sge = wqe->ud_wr.wr.num_sge;
-
- if (ah_attr->ah_flags & IB_AH_GRH) {
- /* Header size in 32-bit words. */
- qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh,
- &ah_attr->grh,
- qp->s_hdrwords, nwords);
- lrh0 = IPATH_LRH_GRH;
- ohdr = &qp->s_hdr.u.l.oth;
- /*
- * Don't worry about sending to locally attached multicast
- * QPs. It is unspecified by the spec. what happens.
- */
- } else {
- /* Header size in 32-bit words. */
- lrh0 = IPATH_LRH_BTH;
- ohdr = &qp->s_hdr.u.oth;
- }
- if (wqe->ud_wr.wr.opcode == IB_WR_SEND_WITH_IMM) {
- qp->s_hdrwords++;
- ohdr->u.ud.imm_data = wqe->ud_wr.wr.ex.imm_data;
- bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
- } else
- bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
- lrh0 |= ah_attr->sl << 4;
- if (qp->ibqp.qp_type == IB_QPT_SMI)
- lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
- qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
- qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
- qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
- SIZE_OF_CRC);
- lid = dev->dd->ipath_lid;
- if (lid) {
- lid |= ah_attr->src_path_bits &
- ((1 << dev->dd->ipath_lmc) - 1);
- qp->s_hdr.lrh[3] = cpu_to_be16(lid);
- } else
- qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE;
- if (wqe->ud_wr.wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= 1 << 23;
- bth0 |= extra_bytes << 20;
- bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY :
- ipath_get_pkey(dev->dd, qp->s_pkey_index);
- ohdr->bth[0] = cpu_to_be32(bth0);
- /*
- * Use the multicast QP if the destination LID is a multicast LID.
- */
- ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
- ah_attr->dlid != IPATH_PERMISSIVE_LID ?
- cpu_to_be32(IPATH_MULTICAST_QPN) :
- cpu_to_be32(wqe->ud_wr.remote_qpn);
- ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK);
- /*
- * Qkeys with the high order bit set mean use the
- * qkey from the QP context instead of the WR (see 10.2.5).
- */
- ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ?
- qp->qkey : wqe->ud_wr.remote_qkey);
- ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
-
-done:
- ret = 1;
- goto unlock;
-
-bail:
- qp->s_flags &= ~IPATH_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
- return ret;
-}
-
-/**
- * ipath_ud_rcv - receive an incoming UD packet
- * @dev: the device the packet came in on
- * @hdr: the packet header
- * @has_grh: true if the packet has a GRH
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP the packet came on
- *
- * This is called from ipath_qp_rcv() to process an incoming UD packet
- * for the given QP.
- * Called at interrupt level.
- */
-void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
-{
- struct ipath_other_headers *ohdr;
- int opcode;
- u32 hdrsize;
- u32 pad;
- struct ib_wc wc;
- u32 qkey;
- u32 src_qp;
- u16 dlid;
- int header_in_data;
-
- /* Check for GRH */
- if (!has_grh) {
- ohdr = &hdr->u.oth;
- hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */
- qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
- src_qp = be32_to_cpu(ohdr->u.ud.deth[1]);
- header_in_data = 0;
- } else {
- ohdr = &hdr->u.l.oth;
- hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
- /*
- * The header with GRH is 68 bytes and the core driver sets
- * the eager header buffer size to 56 bytes so the last 12
- * bytes of the IB header is in the data buffer.
- */
- header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
- if (header_in_data) {
- qkey = be32_to_cpu(((__be32 *) data)[1]);
- src_qp = be32_to_cpu(((__be32 *) data)[2]);
- data += 12;
- } else {
- qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
- src_qp = be32_to_cpu(ohdr->u.ud.deth[1]);
- }
- }
- src_qp &= IPATH_QPN_MASK;
-
- /*
- * Check that the permissive LID is only used on QP0
- * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
- */
- if (qp->ibqp.qp_num) {
- if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
- hdr->lrh[3] == IB_LID_PERMISSIVE)) {
- dev->n_pkt_drops++;
- goto bail;
- }
- if (unlikely(qkey != qp->qkey)) {
- /* XXX OK to lose a count once in a while. */
- dev->qkey_violations++;
- dev->n_pkt_drops++;
- goto bail;
- }
- } else if (hdr->lrh[1] == IB_LID_PERMISSIVE ||
- hdr->lrh[3] == IB_LID_PERMISSIVE) {
- struct ib_smp *smp = (struct ib_smp *) data;
-
- if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- dev->n_pkt_drops++;
- goto bail;
- }
- }
-
- /*
- * The opcode is in the low byte when its in network order
- * (top byte when in host order).
- */
- opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
- if (qp->ibqp.qp_num > 1 &&
- opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
- if (header_in_data) {
- wc.ex.imm_data = *(__be32 *) data;
- data += sizeof(__be32);
- } else
- wc.ex.imm_data = ohdr->u.ud.imm_data;
- wc.wc_flags = IB_WC_WITH_IMM;
- hdrsize += sizeof(u32);
- } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
- wc.ex.imm_data = 0;
- wc.wc_flags = 0;
- } else {
- dev->n_pkt_drops++;
- goto bail;
- }
-
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- if (unlikely(tlen < (hdrsize + pad + 4))) {
- /* Drop incomplete packets. */
- dev->n_pkt_drops++;
- goto bail;
- }
- tlen -= hdrsize + pad + 4;
-
- /* Drop invalid MAD packets (see 13.5.3.1). */
- if (unlikely((qp->ibqp.qp_num == 0 &&
- (tlen != 256 ||
- (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)) ||
- (qp->ibqp.qp_num == 1 &&
- (tlen != 256 ||
- (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))) {
- dev->n_pkt_drops++;
- goto bail;
- }
-
- /*
- * A GRH is expected to precede the data even if not
- * present on the wire.
- */
- wc.byte_len = tlen + sizeof(struct ib_grh);
-
- /*
- * Get the next work request entry to find where to put the data.
- */
- if (qp->r_flags & IPATH_R_REUSE_SGE)
- qp->r_flags &= ~IPATH_R_REUSE_SGE;
- else if (!ipath_get_rwqe(qp, 0)) {
- /*
- * Count VL15 packets dropped due to no receive buffer.
- * Otherwise, count them as buffer overruns since usually,
- * the HW will be able to receive packets even if there are
- * no QPs with posted receive buffers.
- */
- if (qp->ibqp.qp_num == 0)
- dev->n_vl15_dropped++;
- else
- dev->rcv_errors++;
- goto bail;
- }
- /* Silently drop packets which are too big. */
- if (wc.byte_len > qp->r_len) {
- qp->r_flags |= IPATH_R_REUSE_SGE;
- dev->n_pkt_drops++;
- goto bail;
- }
- if (has_grh) {
- ipath_copy_sge(&qp->r_sge, &hdr->u.l.grh,
- sizeof(struct ib_grh));
- wc.wc_flags |= IB_WC_GRH;
- } else
- ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh));
- ipath_copy_sge(&qp->r_sge, data,
- wc.byte_len - sizeof(struct ib_grh));
- if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags))
- goto bail;
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.opcode = IB_WC_RECV;
- wc.vendor_err = 0;
- wc.qp = &qp->ibqp;
- wc.src_qp = src_qp;
- /* XXX do we know which pkey matched? Only needed for GSI. */
- wc.pkey_index = 0;
- wc.slid = be16_to_cpu(hdr->lrh[3]);
- wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
- dlid = be16_to_cpu(hdr->lrh[1]);
- /*
- * Save the LMC lower bits if the destination LID is a unicast LID.
- */
- wc.dlid_path_bits = dlid >= IPATH_MULTICAST_LID_BASE ? 0 :
- dlid & ((1 << dev->dd->ipath_lmc) - 1);
- wc.port_num = 1;
- /* Signal completion event if the solicited bit is set. */
- ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
- (ohdr->bth[0] &
- cpu_to_be32(1 << 23)) != 0);
-
-bail:;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_user_pages.c b/drivers/staging/rdma/ipath/ipath_user_pages.c
deleted file mode 100644
index d29b4daf61f8..000000000000
--- a/drivers/staging/rdma/ipath/ipath_user_pages.c
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/mm.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-
-#include "ipath_kernel.h"
-
-static void __ipath_release_user_pages(struct page **p, size_t num_pages,
- int dirty)
-{
- size_t i;
-
- for (i = 0; i < num_pages; i++) {
- ipath_cdbg(MM, "%lu/%lu put_page %p\n", (unsigned long) i,
- (unsigned long) num_pages, p[i]);
- if (dirty)
- set_page_dirty_lock(p[i]);
- put_page(p[i]);
- }
-}
-
-/* call with current->mm->mmap_sem held */
-static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
- struct page **p)
-{
- unsigned long lock_limit;
- size_t got;
- int ret;
-
- lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-
- if (num_pages > lock_limit) {
- ret = -ENOMEM;
- goto bail;
- }
-
- ipath_cdbg(VERBOSE, "pin %lx pages from vaddr %lx\n",
- (unsigned long) num_pages, start_page);
-
- for (got = 0; got < num_pages; got += ret) {
- ret = get_user_pages(current, current->mm,
- start_page + got * PAGE_SIZE,
- num_pages - got, 1, 1,
- p + got, NULL);
- if (ret < 0)
- goto bail_release;
- }
-
- current->mm->pinned_vm += num_pages;
-
- ret = 0;
- goto bail;
-
-bail_release:
- __ipath_release_user_pages(p, got, 0);
-bail:
- return ret;
-}
-
-/**
- * ipath_map_page - a safety wrapper around pci_map_page()
- *
- * A dma_addr of all 0's is interpreted by the chip as "disabled".
- * Unfortunately, it can also be a valid dma_addr returned on some
- * architectures.
- *
- * The powerpc iommu assigns dma_addrs in ascending order, so we don't
- * have to bother with retries or mapping a dummy page to insure we
- * don't just get the same mapping again.
- *
- * I'm sure we won't be so lucky with other iommu's, so FIXME.
- */
-dma_addr_t ipath_map_page(struct pci_dev *hwdev, struct page *page,
- unsigned long offset, size_t size, int direction)
-{
- dma_addr_t phys;
-
- phys = pci_map_page(hwdev, page, offset, size, direction);
-
- if (phys == 0) {
- pci_unmap_page(hwdev, phys, size, direction);
- phys = pci_map_page(hwdev, page, offset, size, direction);
- /*
- * FIXME: If we get 0 again, we should keep this page,
- * map another, then free the 0 page.
- */
- }
-
- return phys;
-}
-
-/**
- * ipath_map_single - a safety wrapper around pci_map_single()
- *
- * Same idea as ipath_map_page().
- */
-dma_addr_t ipath_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
- int direction)
-{
- dma_addr_t phys;
-
- phys = pci_map_single(hwdev, ptr, size, direction);
-
- if (phys == 0) {
- pci_unmap_single(hwdev, phys, size, direction);
- phys = pci_map_single(hwdev, ptr, size, direction);
- /*
- * FIXME: If we get 0 again, we should keep this page,
- * map another, then free the 0 page.
- */
- }
-
- return phys;
-}
-
-/**
- * ipath_get_user_pages - lock user pages into memory
- * @start_page: the start page
- * @num_pages: the number of pages
- * @p: the output page structures
- *
- * This function takes a given start page (page aligned user virtual
- * address) and pins it and the following specified number of pages. For
- * now, num_pages is always 1, but that will probably change at some point
- * (because caller is doing expected sends on a single virtually contiguous
- * buffer, so we can do all pages at once).
- */
-int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
- struct page **p)
-{
- int ret;
-
- down_write(&current->mm->mmap_sem);
-
- ret = __ipath_get_user_pages(start_page, num_pages, p);
-
- up_write(&current->mm->mmap_sem);
-
- return ret;
-}
-
-void ipath_release_user_pages(struct page **p, size_t num_pages)
-{
- down_write(&current->mm->mmap_sem);
-
- __ipath_release_user_pages(p, num_pages, 1);
-
- current->mm->pinned_vm -= num_pages;
-
- up_write(&current->mm->mmap_sem);
-}
-
-struct ipath_user_pages_work {
- struct work_struct work;
- struct mm_struct *mm;
- unsigned long num_pages;
-};
-
-static void user_pages_account(struct work_struct *_work)
-{
- struct ipath_user_pages_work *work =
- container_of(_work, struct ipath_user_pages_work, work);
-
- down_write(&work->mm->mmap_sem);
- work->mm->pinned_vm -= work->num_pages;
- up_write(&work->mm->mmap_sem);
- mmput(work->mm);
- kfree(work);
-}
-
-void ipath_release_user_pages_on_close(struct page **p, size_t num_pages)
-{
- struct ipath_user_pages_work *work;
- struct mm_struct *mm;
-
- __ipath_release_user_pages(p, num_pages, 1);
-
- mm = get_task_mm(current);
- if (!mm)
- return;
-
- work = kmalloc(sizeof(*work), GFP_KERNEL);
- if (!work)
- goto bail_mm;
-
- INIT_WORK(&work->work, user_pages_account);
- work->mm = mm;
- work->num_pages = num_pages;
-
- queue_work(ib_wq, &work->work);
- return;
-
-bail_mm:
- mmput(mm);
- return;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_user_sdma.c b/drivers/staging/rdma/ipath/ipath_user_sdma.c
deleted file mode 100644
index 8c12e3cccc58..000000000000
--- a/drivers/staging/rdma/ipath/ipath_user_sdma.c
+++ /dev/null
@@ -1,874 +0,0 @@
-/*
- * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/dmapool.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/highmem.h>
-#include <linux/io.h>
-#include <linux/uio.h>
-#include <linux/rbtree.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-
-#include "ipath_kernel.h"
-#include "ipath_user_sdma.h"
-
-/* minimum size of header */
-#define IPATH_USER_SDMA_MIN_HEADER_LENGTH 64
-/* expected size of headers (for dma_pool) */
-#define IPATH_USER_SDMA_EXP_HEADER_LENGTH 64
-/* length mask in PBC (lower 11 bits) */
-#define IPATH_PBC_LENGTH_MASK ((1 << 11) - 1)
-
-struct ipath_user_sdma_pkt {
- u8 naddr; /* dimension of addr (1..3) ... */
- u32 counter; /* sdma pkts queued counter for this entry */
- u64 added; /* global descq number of entries */
-
- struct {
- u32 offset; /* offset for kvaddr, addr */
- u32 length; /* length in page */
- u8 put_page; /* should we put_page? */
- u8 dma_mapped; /* is page dma_mapped? */
- struct page *page; /* may be NULL (coherent mem) */
- void *kvaddr; /* FIXME: only for pio hack */
- dma_addr_t addr;
- } addr[4]; /* max pages, any more and we coalesce */
- struct list_head list; /* list element */
-};
-
-struct ipath_user_sdma_queue {
- /*
- * pkts sent to dma engine are queued on this
- * list head. the type of the elements of this
- * list are struct ipath_user_sdma_pkt...
- */
- struct list_head sent;
-
- /* headers with expected length are allocated from here... */
- char header_cache_name[64];
- struct dma_pool *header_cache;
-
- /* packets are allocated from the slab cache... */
- char pkt_slab_name[64];
- struct kmem_cache *pkt_slab;
-
- /* as packets go on the queued queue, they are counted... */
- u32 counter;
- u32 sent_counter;
-
- /* dma page table */
- struct rb_root dma_pages_root;
-
- /* protect everything above... */
- struct mutex lock;
-};
-
-struct ipath_user_sdma_queue *
-ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport)
-{
- struct ipath_user_sdma_queue *pq =
- kmalloc(sizeof(struct ipath_user_sdma_queue), GFP_KERNEL);
-
- if (!pq)
- goto done;
-
- pq->counter = 0;
- pq->sent_counter = 0;
- INIT_LIST_HEAD(&pq->sent);
-
- mutex_init(&pq->lock);
-
- snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
- "ipath-user-sdma-pkts-%u-%02u.%02u", unit, port, sport);
- pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
- sizeof(struct ipath_user_sdma_pkt),
- 0, 0, NULL);
-
- if (!pq->pkt_slab)
- goto err_kfree;
-
- snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
- "ipath-user-sdma-headers-%u-%02u.%02u", unit, port, sport);
- pq->header_cache = dma_pool_create(pq->header_cache_name,
- dev,
- IPATH_USER_SDMA_EXP_HEADER_LENGTH,
- 4, 0);
- if (!pq->header_cache)
- goto err_slab;
-
- pq->dma_pages_root = RB_ROOT;
-
- goto done;
-
-err_slab:
- kmem_cache_destroy(pq->pkt_slab);
-err_kfree:
- kfree(pq);
- pq = NULL;
-
-done:
- return pq;
-}
-
-static void ipath_user_sdma_init_frag(struct ipath_user_sdma_pkt *pkt,
- int i, size_t offset, size_t len,
- int put_page, int dma_mapped,
- struct page *page,
- void *kvaddr, dma_addr_t dma_addr)
-{
- pkt->addr[i].offset = offset;
- pkt->addr[i].length = len;
- pkt->addr[i].put_page = put_page;
- pkt->addr[i].dma_mapped = dma_mapped;
- pkt->addr[i].page = page;
- pkt->addr[i].kvaddr = kvaddr;
- pkt->addr[i].addr = dma_addr;
-}
-
-static void ipath_user_sdma_init_header(struct ipath_user_sdma_pkt *pkt,
- u32 counter, size_t offset,
- size_t len, int dma_mapped,
- struct page *page,
- void *kvaddr, dma_addr_t dma_addr)
-{
- pkt->naddr = 1;
- pkt->counter = counter;
- ipath_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
- kvaddr, dma_addr);
-}
-
-/* we've too many pages in the iovec, coalesce to a single page */
-static int ipath_user_sdma_coalesce(const struct ipath_devdata *dd,
- struct ipath_user_sdma_pkt *pkt,
- const struct iovec *iov,
- unsigned long niov) {
- int ret = 0;
- struct page *page = alloc_page(GFP_KERNEL);
- void *mpage_save;
- char *mpage;
- int i;
- int len = 0;
- dma_addr_t dma_addr;
-
- if (!page) {
- ret = -ENOMEM;
- goto done;
- }
-
- mpage = kmap(page);
- mpage_save = mpage;
- for (i = 0; i < niov; i++) {
- int cfur;
-
- cfur = copy_from_user(mpage,
- iov[i].iov_base, iov[i].iov_len);
- if (cfur) {
- ret = -EFAULT;
- goto free_unmap;
- }
-
- mpage += iov[i].iov_len;
- len += iov[i].iov_len;
- }
-
- dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
- ret = -ENOMEM;
- goto free_unmap;
- }
-
- ipath_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
- dma_addr);
- pkt->naddr = 2;
-
- goto done;
-
-free_unmap:
- kunmap(page);
- __free_page(page);
-done:
- return ret;
-}
-
-/* how many pages in this iovec element? */
-static int ipath_user_sdma_num_pages(const struct iovec *iov)
-{
- const unsigned long addr = (unsigned long) iov->iov_base;
- const unsigned long len = iov->iov_len;
- const unsigned long spage = addr & PAGE_MASK;
- const unsigned long epage = (addr + len - 1) & PAGE_MASK;
-
- return 1 + ((epage - spage) >> PAGE_SHIFT);
-}
-
-/* truncate length to page boundary */
-static int ipath_user_sdma_page_length(unsigned long addr, unsigned long len)
-{
- const unsigned long offset = offset_in_page(addr);
-
- return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
-}
-
-static void ipath_user_sdma_free_pkt_frag(struct device *dev,
- struct ipath_user_sdma_queue *pq,
- struct ipath_user_sdma_pkt *pkt,
- int frag)
-{
- const int i = frag;
-
- if (pkt->addr[i].page) {
- if (pkt->addr[i].dma_mapped)
- dma_unmap_page(dev,
- pkt->addr[i].addr,
- pkt->addr[i].length,
- DMA_TO_DEVICE);
-
- if (pkt->addr[i].kvaddr)
- kunmap(pkt->addr[i].page);
-
- if (pkt->addr[i].put_page)
- put_page(pkt->addr[i].page);
- else
- __free_page(pkt->addr[i].page);
- } else if (pkt->addr[i].kvaddr)
- /* free coherent mem from cache... */
- dma_pool_free(pq->header_cache,
- pkt->addr[i].kvaddr, pkt->addr[i].addr);
-}
-
-/* return number of pages pinned... */
-static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
- struct ipath_user_sdma_pkt *pkt,
- unsigned long addr, int tlen, int npages)
-{
- struct page *pages[2];
- int j;
- int ret;
-
- ret = get_user_pages_fast(addr, npages, 0, pages);
- if (ret != npages) {
- int i;
-
- for (i = 0; i < ret; i++)
- put_page(pages[i]);
-
- ret = -ENOMEM;
- goto done;
- }
-
- for (j = 0; j < npages; j++) {
- /* map the pages... */
- const int flen =
- ipath_user_sdma_page_length(addr, tlen);
- dma_addr_t dma_addr =
- dma_map_page(&dd->pcidev->dev,
- pages[j], 0, flen, DMA_TO_DEVICE);
- unsigned long fofs = offset_in_page(addr);
-
- if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
- ret = -ENOMEM;
- goto done;
- }
-
- ipath_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
- pages[j], kmap(pages[j]),
- dma_addr);
-
- pkt->naddr++;
- addr += flen;
- tlen -= flen;
- }
-
-done:
- return ret;
-}
-
-static int ipath_user_sdma_pin_pkt(const struct ipath_devdata *dd,
- struct ipath_user_sdma_queue *pq,
- struct ipath_user_sdma_pkt *pkt,
- const struct iovec *iov,
- unsigned long niov)
-{
- int ret = 0;
- unsigned long idx;
-
- for (idx = 0; idx < niov; idx++) {
- const int npages = ipath_user_sdma_num_pages(iov + idx);
- const unsigned long addr = (unsigned long) iov[idx].iov_base;
-
- ret = ipath_user_sdma_pin_pages(dd, pkt,
- addr, iov[idx].iov_len,
- npages);
- if (ret < 0)
- goto free_pkt;
- }
-
- goto done;
-
-free_pkt:
- for (idx = 0; idx < pkt->naddr; idx++)
- ipath_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
-
-done:
- return ret;
-}
-
-static int ipath_user_sdma_init_payload(const struct ipath_devdata *dd,
- struct ipath_user_sdma_queue *pq,
- struct ipath_user_sdma_pkt *pkt,
- const struct iovec *iov,
- unsigned long niov, int npages)
-{
- int ret = 0;
-
- if (npages >= ARRAY_SIZE(pkt->addr))
- ret = ipath_user_sdma_coalesce(dd, pkt, iov, niov);
- else
- ret = ipath_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
-
- return ret;
-}
-
-/* free a packet list -- return counter value of last packet */
-static void ipath_user_sdma_free_pkt_list(struct device *dev,
- struct ipath_user_sdma_queue *pq,
- struct list_head *list)
-{
- struct ipath_user_sdma_pkt *pkt, *pkt_next;
-
- list_for_each_entry_safe(pkt, pkt_next, list, list) {
- int i;
-
- for (i = 0; i < pkt->naddr; i++)
- ipath_user_sdma_free_pkt_frag(dev, pq, pkt, i);
-
- kmem_cache_free(pq->pkt_slab, pkt);
- }
-}
-
-/*
- * copy headers, coalesce etc -- pq->lock must be held
- *
- * we queue all the packets to list, returning the
- * number of bytes total. list must be empty initially,
- * as, if there is an error we clean it...
- */
-static int ipath_user_sdma_queue_pkts(const struct ipath_devdata *dd,
- struct ipath_user_sdma_queue *pq,
- struct list_head *list,
- const struct iovec *iov,
- unsigned long niov,
- int maxpkts)
-{
- unsigned long idx = 0;
- int ret = 0;
- int npkts = 0;
- struct page *page = NULL;
- __le32 *pbc;
- dma_addr_t dma_addr;
- struct ipath_user_sdma_pkt *pkt = NULL;
- size_t len;
- size_t nw;
- u32 counter = pq->counter;
- int dma_mapped = 0;
-
- while (idx < niov && npkts < maxpkts) {
- const unsigned long addr = (unsigned long) iov[idx].iov_base;
- const unsigned long idx_save = idx;
- unsigned pktnw;
- unsigned pktnwc;
- int nfrags = 0;
- int npages = 0;
- int cfur;
-
- dma_mapped = 0;
- len = iov[idx].iov_len;
- nw = len >> 2;
- page = NULL;
-
- pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
- if (!pkt) {
- ret = -ENOMEM;
- goto free_list;
- }
-
- if (len < IPATH_USER_SDMA_MIN_HEADER_LENGTH ||
- len > PAGE_SIZE || len & 3 || addr & 3) {
- ret = -EINVAL;
- goto free_pkt;
- }
-
- if (len == IPATH_USER_SDMA_EXP_HEADER_LENGTH)
- pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
- &dma_addr);
- else
- pbc = NULL;
-
- if (!pbc) {
- page = alloc_page(GFP_KERNEL);
- if (!page) {
- ret = -ENOMEM;
- goto free_pkt;
- }
- pbc = kmap(page);
- }
-
- cfur = copy_from_user(pbc, iov[idx].iov_base, len);
- if (cfur) {
- ret = -EFAULT;
- goto free_pbc;
- }
-
- /*
- * this assignment is a bit strange. it's because the
- * the pbc counts the number of 32 bit words in the full
- * packet _except_ the first word of the pbc itself...
- */
- pktnwc = nw - 1;
-
- /*
- * pktnw computation yields the number of 32 bit words
- * that the caller has indicated in the PBC. note that
- * this is one less than the total number of words that
- * goes to the send DMA engine as the first 32 bit word
- * of the PBC itself is not counted. Armed with this count,
- * we can verify that the packet is consistent with the
- * iovec lengths.
- */
- pktnw = le32_to_cpu(*pbc) & IPATH_PBC_LENGTH_MASK;
- if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
- ret = -EINVAL;
- goto free_pbc;
- }
-
-
- idx++;
- while (pktnwc < pktnw && idx < niov) {
- const size_t slen = iov[idx].iov_len;
- const unsigned long faddr =
- (unsigned long) iov[idx].iov_base;
-
- if (slen & 3 || faddr & 3 || !slen ||
- slen > PAGE_SIZE) {
- ret = -EINVAL;
- goto free_pbc;
- }
-
- npages++;
- if ((faddr & PAGE_MASK) !=
- ((faddr + slen - 1) & PAGE_MASK))
- npages++;
-
- pktnwc += slen >> 2;
- idx++;
- nfrags++;
- }
-
- if (pktnwc != pktnw) {
- ret = -EINVAL;
- goto free_pbc;
- }
-
- if (page) {
- dma_addr = dma_map_page(&dd->pcidev->dev,
- page, 0, len, DMA_TO_DEVICE);
- if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
- ret = -ENOMEM;
- goto free_pbc;
- }
-
- dma_mapped = 1;
- }
-
- ipath_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
- page, pbc, dma_addr);
-
- if (nfrags) {
- ret = ipath_user_sdma_init_payload(dd, pq, pkt,
- iov + idx_save + 1,
- nfrags, npages);
- if (ret < 0)
- goto free_pbc_dma;
- }
-
- counter++;
- npkts++;
-
- list_add_tail(&pkt->list, list);
- }
-
- ret = idx;
- goto done;
-
-free_pbc_dma:
- if (dma_mapped)
- dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
-free_pbc:
- if (page) {
- kunmap(page);
- __free_page(page);
- } else
- dma_pool_free(pq->header_cache, pbc, dma_addr);
-free_pkt:
- kmem_cache_free(pq->pkt_slab, pkt);
-free_list:
- ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
-done:
- return ret;
-}
-
-static void ipath_user_sdma_set_complete_counter(struct ipath_user_sdma_queue *pq,
- u32 c)
-{
- pq->sent_counter = c;
-}
-
-/* try to clean out queue -- needs pq->lock */
-static int ipath_user_sdma_queue_clean(const struct ipath_devdata *dd,
- struct ipath_user_sdma_queue *pq)
-{
- struct list_head free_list;
- struct ipath_user_sdma_pkt *pkt;
- struct ipath_user_sdma_pkt *pkt_prev;
- int ret = 0;
-
- INIT_LIST_HEAD(&free_list);
-
- list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
- s64 descd = dd->ipath_sdma_descq_removed - pkt->added;
-
- if (descd < 0)
- break;
-
- list_move_tail(&pkt->list, &free_list);
-
- /* one more packet cleaned */
- ret++;
- }
-
- if (!list_empty(&free_list)) {
- u32 counter;
-
- pkt = list_entry(free_list.prev,
- struct ipath_user_sdma_pkt, list);
- counter = pkt->counter;
-
- ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
- ipath_user_sdma_set_complete_counter(pq, counter);
- }
-
- return ret;
-}
-
-void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq)
-{
- if (!pq)
- return;
-
- kmem_cache_destroy(pq->pkt_slab);
- dma_pool_destroy(pq->header_cache);
- kfree(pq);
-}
-
-/* clean descriptor queue, returns > 0 if some elements cleaned */
-static int ipath_user_sdma_hwqueue_clean(struct ipath_devdata *dd)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
- ret = ipath_sdma_make_progress(dd);
- spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
- return ret;
-}
-
-/* we're in close, drain packets so that we can cleanup successfully... */
-void ipath_user_sdma_queue_drain(struct ipath_devdata *dd,
- struct ipath_user_sdma_queue *pq)
-{
- int i;
-
- if (!pq)
- return;
-
- for (i = 0; i < 100; i++) {
- mutex_lock(&pq->lock);
- if (list_empty(&pq->sent)) {
- mutex_unlock(&pq->lock);
- break;
- }
- ipath_user_sdma_hwqueue_clean(dd);
- ipath_user_sdma_queue_clean(dd, pq);
- mutex_unlock(&pq->lock);
- msleep(10);
- }
-
- if (!list_empty(&pq->sent)) {
- struct list_head free_list;
-
- printk(KERN_INFO "drain: lists not empty: forcing!\n");
- INIT_LIST_HEAD(&free_list);
- mutex_lock(&pq->lock);
- list_splice_init(&pq->sent, &free_list);
- ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
- mutex_unlock(&pq->lock);
- }
-}
-
-static inline __le64 ipath_sdma_make_desc0(struct ipath_devdata *dd,
- u64 addr, u64 dwlen, u64 dwoffset)
-{
- return cpu_to_le64(/* SDmaPhyAddr[31:0] */
- ((addr & 0xfffffffcULL) << 32) |
- /* SDmaGeneration[1:0] */
- ((dd->ipath_sdma_generation & 3ULL) << 30) |
- /* SDmaDwordCount[10:0] */
- ((dwlen & 0x7ffULL) << 16) |
- /* SDmaBufOffset[12:2] */
- (dwoffset & 0x7ffULL));
-}
-
-static inline __le64 ipath_sdma_make_first_desc0(__le64 descq)
-{
- return descq | cpu_to_le64(1ULL << 12);
-}
-
-static inline __le64 ipath_sdma_make_last_desc0(__le64 descq)
-{
- /* last */ /* dma head */
- return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
-}
-
-static inline __le64 ipath_sdma_make_desc1(u64 addr)
-{
- /* SDmaPhyAddr[47:32] */
- return cpu_to_le64(addr >> 32);
-}
-
-static void ipath_user_sdma_send_frag(struct ipath_devdata *dd,
- struct ipath_user_sdma_pkt *pkt, int idx,
- unsigned ofs, u16 tail)
-{
- const u64 addr = (u64) pkt->addr[idx].addr +
- (u64) pkt->addr[idx].offset;
- const u64 dwlen = (u64) pkt->addr[idx].length / 4;
- __le64 *descqp;
- __le64 descq0;
-
- descqp = &dd->ipath_sdma_descq[tail].qw[0];
-
- descq0 = ipath_sdma_make_desc0(dd, addr, dwlen, ofs);
- if (idx == 0)
- descq0 = ipath_sdma_make_first_desc0(descq0);
- if (idx == pkt->naddr - 1)
- descq0 = ipath_sdma_make_last_desc0(descq0);
-
- descqp[0] = descq0;
- descqp[1] = ipath_sdma_make_desc1(addr);
-}
-
-/* pq->lock must be held, get packets on the wire... */
-static int ipath_user_sdma_push_pkts(struct ipath_devdata *dd,
- struct ipath_user_sdma_queue *pq,
- struct list_head *pktlist)
-{
- int ret = 0;
- unsigned long flags;
- u16 tail;
-
- if (list_empty(pktlist))
- return 0;
-
- if (unlikely(!(dd->ipath_flags & IPATH_LINKACTIVE)))
- return -ECOMM;
-
- spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-
- if (unlikely(dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK)) {
- ret = -ECOMM;
- goto unlock;
- }
-
- tail = dd->ipath_sdma_descq_tail;
- while (!list_empty(pktlist)) {
- struct ipath_user_sdma_pkt *pkt =
- list_entry(pktlist->next, struct ipath_user_sdma_pkt,
- list);
- int i;
- unsigned ofs = 0;
- u16 dtail = tail;
-
- if (pkt->naddr > ipath_sdma_descq_freecnt(dd))
- goto unlock_check_tail;
-
- for (i = 0; i < pkt->naddr; i++) {
- ipath_user_sdma_send_frag(dd, pkt, i, ofs, tail);
- ofs += pkt->addr[i].length >> 2;
-
- if (++tail == dd->ipath_sdma_descq_cnt) {
- tail = 0;
- ++dd->ipath_sdma_generation;
- }
- }
-
- if ((ofs<<2) > dd->ipath_ibmaxlen) {
- ipath_dbg("packet size %X > ibmax %X, fail\n",
- ofs<<2, dd->ipath_ibmaxlen);
- ret = -EMSGSIZE;
- goto unlock;
- }
-
- /*
- * if the packet is >= 2KB mtu equivalent, we have to use
- * the large buffers, and have to mark each descriptor as
- * part of a large buffer packet.
- */
- if (ofs >= IPATH_SMALLBUF_DWORDS) {
- for (i = 0; i < pkt->naddr; i++) {
- dd->ipath_sdma_descq[dtail].qw[0] |=
- cpu_to_le64(1ULL << 14);
- if (++dtail == dd->ipath_sdma_descq_cnt)
- dtail = 0;
- }
- }
-
- dd->ipath_sdma_descq_added += pkt->naddr;
- pkt->added = dd->ipath_sdma_descq_added;
- list_move_tail(&pkt->list, &pq->sent);
- ret++;
- }
-
-unlock_check_tail:
- /* advance the tail on the chip if necessary */
- if (dd->ipath_sdma_descq_tail != tail) {
- wmb();
- ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
- dd->ipath_sdma_descq_tail = tail;
- }
-
-unlock:
- spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
- return ret;
-}
-
-int ipath_user_sdma_writev(struct ipath_devdata *dd,
- struct ipath_user_sdma_queue *pq,
- const struct iovec *iov,
- unsigned long dim)
-{
- int ret = 0;
- struct list_head list;
- int npkts = 0;
-
- INIT_LIST_HEAD(&list);
-
- mutex_lock(&pq->lock);
-
- if (dd->ipath_sdma_descq_added != dd->ipath_sdma_descq_removed) {
- ipath_user_sdma_hwqueue_clean(dd);
- ipath_user_sdma_queue_clean(dd, pq);
- }
-
- while (dim) {
- const int mxp = 8;
-
- ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
- if (ret <= 0)
- goto done_unlock;
- else {
- dim -= ret;
- iov += ret;
- }
-
- /* force packets onto the sdma hw queue... */
- if (!list_empty(&list)) {
- /*
- * lazily clean hw queue. the 4 is a guess of about
- * how many sdma descriptors a packet will take (it
- * doesn't have to be perfect).
- */
- if (ipath_sdma_descq_freecnt(dd) < ret * 4) {
- ipath_user_sdma_hwqueue_clean(dd);
- ipath_user_sdma_queue_clean(dd, pq);
- }
-
- ret = ipath_user_sdma_push_pkts(dd, pq, &list);
- if (ret < 0)
- goto done_unlock;
- else {
- npkts += ret;
- pq->counter += ret;
-
- if (!list_empty(&list))
- goto done_unlock;
- }
- }
- }
-
-done_unlock:
- if (!list_empty(&list))
- ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
- mutex_unlock(&pq->lock);
-
- return (ret < 0) ? ret : npkts;
-}
-
-int ipath_user_sdma_make_progress(struct ipath_devdata *dd,
- struct ipath_user_sdma_queue *pq)
-{
- int ret = 0;
-
- mutex_lock(&pq->lock);
- ipath_user_sdma_hwqueue_clean(dd);
- ret = ipath_user_sdma_queue_clean(dd, pq);
- mutex_unlock(&pq->lock);
-
- return ret;
-}
-
-u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq)
-{
- return pq->sent_counter;
-}
-
-u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq)
-{
- return pq->counter;
-}
-
diff --git a/drivers/staging/rdma/ipath/ipath_user_sdma.h b/drivers/staging/rdma/ipath/ipath_user_sdma.h
deleted file mode 100644
index fc76316c4a58..000000000000
--- a/drivers/staging/rdma/ipath/ipath_user_sdma.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/device.h>
-
-struct ipath_user_sdma_queue;
-
-struct ipath_user_sdma_queue *
-ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport);
-void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq);
-
-int ipath_user_sdma_writev(struct ipath_devdata *dd,
- struct ipath_user_sdma_queue *pq,
- const struct iovec *iov,
- unsigned long dim);
-
-int ipath_user_sdma_make_progress(struct ipath_devdata *dd,
- struct ipath_user_sdma_queue *pq);
-
-void ipath_user_sdma_queue_drain(struct ipath_devdata *dd,
- struct ipath_user_sdma_queue *pq);
-
-u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq);
-u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq);
diff --git a/drivers/staging/rdma/ipath/ipath_verbs.c b/drivers/staging/rdma/ipath/ipath_verbs.c
deleted file mode 100644
index 53f9dcab180d..000000000000
--- a/drivers/staging/rdma/ipath/ipath_verbs.c
+++ /dev/null
@@ -1,2376 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/ib_mad.h>
-#include <rdma/ib_user_verbs.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/utsname.h>
-#include <linux/rculist.h>
-
-#include "ipath_kernel.h"
-#include "ipath_verbs.h"
-#include "ipath_common.h"
-
-static unsigned int ib_ipath_qp_table_size = 251;
-module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO);
-MODULE_PARM_DESC(qp_table_size, "QP table size");
-
-unsigned int ib_ipath_lkey_table_size = 12;
-module_param_named(lkey_table_size, ib_ipath_lkey_table_size, uint,
- S_IRUGO);
-MODULE_PARM_DESC(lkey_table_size,
- "LKEY table size in bits (2^n, 1 <= n <= 23)");
-
-static unsigned int ib_ipath_max_pds = 0xFFFF;
-module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_pds,
- "Maximum number of protection domains to support");
-
-static unsigned int ib_ipath_max_ahs = 0xFFFF;
-module_param_named(max_ahs, ib_ipath_max_ahs, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
-
-unsigned int ib_ipath_max_cqes = 0x2FFFF;
-module_param_named(max_cqes, ib_ipath_max_cqes, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_cqes,
- "Maximum number of completion queue entries to support");
-
-unsigned int ib_ipath_max_cqs = 0x1FFFF;
-module_param_named(max_cqs, ib_ipath_max_cqs, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
-
-unsigned int ib_ipath_max_qp_wrs = 0x3FFF;
-module_param_named(max_qp_wrs, ib_ipath_max_qp_wrs, uint,
- S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
-
-unsigned int ib_ipath_max_qps = 16384;
-module_param_named(max_qps, ib_ipath_max_qps, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
-
-unsigned int ib_ipath_max_sges = 0x60;
-module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
-
-unsigned int ib_ipath_max_mcast_grps = 16384;
-module_param_named(max_mcast_grps, ib_ipath_max_mcast_grps, uint,
- S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_mcast_grps,
- "Maximum number of multicast groups to support");
-
-unsigned int ib_ipath_max_mcast_qp_attached = 16;
-module_param_named(max_mcast_qp_attached, ib_ipath_max_mcast_qp_attached,
- uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_mcast_qp_attached,
- "Maximum number of attached QPs to support");
-
-unsigned int ib_ipath_max_srqs = 1024;
-module_param_named(max_srqs, ib_ipath_max_srqs, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
-
-unsigned int ib_ipath_max_srq_sges = 128;
-module_param_named(max_srq_sges, ib_ipath_max_srq_sges,
- uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
-
-unsigned int ib_ipath_max_srq_wrs = 0x1FFFF;
-module_param_named(max_srq_wrs, ib_ipath_max_srq_wrs,
- uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
-
-static unsigned int ib_ipath_disable_sma;
-module_param_named(disable_sma, ib_ipath_disable_sma, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(disable_sma, "Disable the SMA");
-
-/*
- * Note that it is OK to post send work requests in the SQE and ERR
- * states; ipath_do_send() will process them and generate error
- * completions as per IB 1.2 C10-96.
- */
-const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
- [IB_QPS_RESET] = 0,
- [IB_QPS_INIT] = IPATH_POST_RECV_OK,
- [IB_QPS_RTR] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
- [IB_QPS_RTS] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
- IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK |
- IPATH_PROCESS_NEXT_SEND_OK,
- [IB_QPS_SQD] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
- IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK,
- [IB_QPS_SQE] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
- IPATH_POST_SEND_OK | IPATH_FLUSH_SEND,
- [IB_QPS_ERR] = IPATH_POST_RECV_OK | IPATH_FLUSH_RECV |
- IPATH_POST_SEND_OK | IPATH_FLUSH_SEND,
-};
-
-struct ipath_ucontext {
- struct ib_ucontext ibucontext;
-};
-
-static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
- *ibucontext)
-{
- return container_of(ibucontext, struct ipath_ucontext, ibucontext);
-}
-
-/*
- * Translate ib_wr_opcode into ib_wc_opcode.
- */
-const enum ib_wc_opcode ib_ipath_wc_opcode[] = {
- [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
- [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
- [IB_WR_SEND] = IB_WC_SEND,
- [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
- [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
- [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
- [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
-};
-
-/*
- * System image GUID.
- */
-static __be64 sys_image_guid;
-
-/**
- * ipath_copy_sge - copy data to SGE memory
- * @ss: the SGE state
- * @data: the data to copy
- * @length: the length of the data
- */
-void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length)
-{
- struct ipath_sge *sge = &ss->sge;
-
- while (length) {
- u32 len = sge->length;
-
- if (len > length)
- len = length;
- if (len > sge->sge_length)
- len = sge->sge_length;
- BUG_ON(len == 0);
- memcpy(sge->vaddr, data, len);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (--ss->num_sge)
- *sge = *ss->sg_list++;
- } else if (sge->length == 0 && sge->mr != NULL) {
- if (++sge->n >= IPATH_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- data += len;
- length -= len;
- }
-}
-
-/**
- * ipath_skip_sge - skip over SGE memory - XXX almost dup of prev func
- * @ss: the SGE state
- * @length: the number of bytes to skip
- */
-void ipath_skip_sge(struct ipath_sge_state *ss, u32 length)
-{
- struct ipath_sge *sge = &ss->sge;
-
- while (length) {
- u32 len = sge->length;
-
- if (len > length)
- len = length;
- if (len > sge->sge_length)
- len = sge->sge_length;
- BUG_ON(len == 0);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (--ss->num_sge)
- *sge = *ss->sg_list++;
- } else if (sge->length == 0 && sge->mr != NULL) {
- if (++sge->n >= IPATH_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- length -= len;
- }
-}
-
-/*
- * Count the number of DMA descriptors needed to send length bytes of data.
- * Don't modify the ipath_sge_state to get the count.
- * Return zero if any of the segments is not aligned.
- */
-static u32 ipath_count_sge(struct ipath_sge_state *ss, u32 length)
-{
- struct ipath_sge *sg_list = ss->sg_list;
- struct ipath_sge sge = ss->sge;
- u8 num_sge = ss->num_sge;
- u32 ndesc = 1; /* count the header */
-
- while (length) {
- u32 len = sge.length;
-
- if (len > length)
- len = length;
- if (len > sge.sge_length)
- len = sge.sge_length;
- BUG_ON(len == 0);
- if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
- (len != length && (len & (sizeof(u32) - 1)))) {
- ndesc = 0;
- break;
- }
- ndesc++;
- sge.vaddr += len;
- sge.length -= len;
- sge.sge_length -= len;
- if (sge.sge_length == 0) {
- if (--num_sge)
- sge = *sg_list++;
- } else if (sge.length == 0 && sge.mr != NULL) {
- if (++sge.n >= IPATH_SEGSZ) {
- if (++sge.m >= sge.mr->mapsz)
- break;
- sge.n = 0;
- }
- sge.vaddr =
- sge.mr->map[sge.m]->segs[sge.n].vaddr;
- sge.length =
- sge.mr->map[sge.m]->segs[sge.n].length;
- }
- length -= len;
- }
- return ndesc;
-}
-
-/*
- * Copy from the SGEs to the data buffer.
- */
-static void ipath_copy_from_sge(void *data, struct ipath_sge_state *ss,
- u32 length)
-{
- struct ipath_sge *sge = &ss->sge;
-
- while (length) {
- u32 len = sge->length;
-
- if (len > length)
- len = length;
- if (len > sge->sge_length)
- len = sge->sge_length;
- BUG_ON(len == 0);
- memcpy(data, sge->vaddr, len);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (--ss->num_sge)
- *sge = *ss->sg_list++;
- } else if (sge->length == 0 && sge->mr != NULL) {
- if (++sge->n >= IPATH_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- data += len;
- length -= len;
- }
-}
-
-/**
- * ipath_post_one_send - post one RC, UC, or UD send work request
- * @qp: the QP to post on
- * @wr: the work request to send
- */
-static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
-{
- struct ipath_swqe *wqe;
- u32 next;
- int i;
- int j;
- int acc;
- int ret;
- unsigned long flags;
- struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
-
- spin_lock_irqsave(&qp->s_lock, flags);
-
- if (qp->ibqp.qp_type != IB_QPT_SMI &&
- !(dd->ipath_flags & IPATH_LINKACTIVE)) {
- ret = -ENETDOWN;
- goto bail;
- }
-
- /* Check that state is OK to post send. */
- if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK)))
- goto bail_inval;
-
- /* IB spec says that num_sge == 0 is OK. */
- if (wr->num_sge > qp->s_max_sge)
- goto bail_inval;
-
- /*
- * Don't allow RDMA reads or atomic operations on UC or
- * undefined operations.
- * Make sure buffer is large enough to hold the result for atomics.
- */
- if (qp->ibqp.qp_type == IB_QPT_UC) {
- if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
- goto bail_inval;
- } else if (qp->ibqp.qp_type == IB_QPT_UD) {
- /* Check UD opcode */
- if (wr->opcode != IB_WR_SEND &&
- wr->opcode != IB_WR_SEND_WITH_IMM)
- goto bail_inval;
- /* Check UD destination address PD */
- if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
- goto bail_inval;
- } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
- goto bail_inval;
- else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
- (wr->num_sge == 0 ||
- wr->sg_list[0].length < sizeof(u64) ||
- wr->sg_list[0].addr & (sizeof(u64) - 1)))
- goto bail_inval;
- else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
- goto bail_inval;
-
- next = qp->s_head + 1;
- if (next >= qp->s_size)
- next = 0;
- if (next == qp->s_last) {
- ret = -ENOMEM;
- goto bail;
- }
-
- wqe = get_swqe_ptr(qp, qp->s_head);
-
- if (qp->ibqp.qp_type != IB_QPT_UC &&
- qp->ibqp.qp_type != IB_QPT_RC)
- memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
- else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
- wr->opcode == IB_WR_RDMA_WRITE ||
- wr->opcode == IB_WR_RDMA_READ)
- memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
- else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
- memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
- else
- memcpy(&wqe->wr, wr, sizeof(wqe->wr));
-
- wqe->length = 0;
- if (wr->num_sge) {
- acc = wr->opcode >= IB_WR_RDMA_READ ?
- IB_ACCESS_LOCAL_WRITE : 0;
- for (i = 0, j = 0; i < wr->num_sge; i++) {
- u32 length = wr->sg_list[i].length;
- int ok;
-
- if (length == 0)
- continue;
- ok = ipath_lkey_ok(qp, &wqe->sg_list[j],
- &wr->sg_list[i], acc);
- if (!ok)
- goto bail_inval;
- wqe->length += length;
- j++;
- }
- wqe->wr.num_sge = j;
- }
- if (qp->ibqp.qp_type == IB_QPT_UC ||
- qp->ibqp.qp_type == IB_QPT_RC) {
- if (wqe->length > 0x80000000U)
- goto bail_inval;
- } else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu)
- goto bail_inval;
- wqe->ssn = qp->s_ssn++;
- qp->s_head = next;
-
- ret = 0;
- goto bail;
-
-bail_inval:
- ret = -EINVAL;
-bail:
- spin_unlock_irqrestore(&qp->s_lock, flags);
- return ret;
-}
-
-/**
- * ipath_post_send - post a send on a QP
- * @ibqp: the QP to post the send on
- * @wr: the list of work requests to post
- * @bad_wr: the first bad WR is put here
- *
- * This may be called from interrupt context.
- */
-static int ipath_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
-{
- struct ipath_qp *qp = to_iqp(ibqp);
- int err = 0;
-
- for (; wr; wr = wr->next) {
- err = ipath_post_one_send(qp, wr);
- if (err) {
- *bad_wr = wr;
- goto bail;
- }
- }
-
- /* Try to do the send work in the caller's context. */
- ipath_do_send((unsigned long) qp);
-
-bail:
- return err;
-}
-
-/**
- * ipath_post_receive - post a receive on a QP
- * @ibqp: the QP to post the receive on
- * @wr: the WR to post
- * @bad_wr: the first bad WR is put here
- *
- * This may be called from interrupt context.
- */
-static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
-{
- struct ipath_qp *qp = to_iqp(ibqp);
- struct ipath_rwq *wq = qp->r_rq.wq;
- unsigned long flags;
- int ret;
-
- /* Check that state is OK to post receive. */
- if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK) || !wq) {
- *bad_wr = wr;
- ret = -EINVAL;
- goto bail;
- }
-
- for (; wr; wr = wr->next) {
- struct ipath_rwqe *wqe;
- u32 next;
- int i;
-
- if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
- *bad_wr = wr;
- ret = -EINVAL;
- goto bail;
- }
-
- spin_lock_irqsave(&qp->r_rq.lock, flags);
- next = wq->head + 1;
- if (next >= qp->r_rq.size)
- next = 0;
- if (next == wq->tail) {
- spin_unlock_irqrestore(&qp->r_rq.lock, flags);
- *bad_wr = wr;
- ret = -ENOMEM;
- goto bail;
- }
-
- wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
- wqe->wr_id = wr->wr_id;
- wqe->num_sge = wr->num_sge;
- for (i = 0; i < wr->num_sge; i++)
- wqe->sg_list[i] = wr->sg_list[i];
- /* Make sure queue entry is written before the head index. */
- smp_wmb();
- wq->head = next;
- spin_unlock_irqrestore(&qp->r_rq.lock, flags);
- }
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * ipath_qp_rcv - processing an incoming packet on a QP
- * @dev: the device the packet came on
- * @hdr: the packet header
- * @has_grh: true if the packet has a GRH
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP the packet came on
- *
- * This is called from ipath_ib_rcv() to process an incoming packet
- * for the given QP.
- * Called at interrupt level.
- */
-static void ipath_qp_rcv(struct ipath_ibdev *dev,
- struct ipath_ib_header *hdr, int has_grh,
- void *data, u32 tlen, struct ipath_qp *qp)
-{
- /* Check for valid receive state. */
- if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
- dev->n_pkt_drops++;
- return;
- }
-
- switch (qp->ibqp.qp_type) {
- case IB_QPT_SMI:
- case IB_QPT_GSI:
- if (ib_ipath_disable_sma)
- break;
- /* FALLTHROUGH */
- case IB_QPT_UD:
- ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp);
- break;
-
- case IB_QPT_RC:
- ipath_rc_rcv(dev, hdr, has_grh, data, tlen, qp);
- break;
-
- case IB_QPT_UC:
- ipath_uc_rcv(dev, hdr, has_grh, data, tlen, qp);
- break;
-
- default:
- break;
- }
-}
-
-/**
- * ipath_ib_rcv - process an incoming packet
- * @arg: the device pointer
- * @rhdr: the header of the packet
- * @data: the packet data
- * @tlen: the packet length
- *
- * This is called from ipath_kreceive() to process an incoming packet at
- * interrupt level. Tlen is the length of the header + data + CRC in bytes.
- */
-void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
- u32 tlen)
-{
- struct ipath_ib_header *hdr = rhdr;
- struct ipath_other_headers *ohdr;
- struct ipath_qp *qp;
- u32 qp_num;
- int lnh;
- u8 opcode;
- u16 lid;
-
- if (unlikely(dev == NULL))
- goto bail;
-
- if (unlikely(tlen < 24)) { /* LRH+BTH+CRC */
- dev->rcv_errors++;
- goto bail;
- }
-
- /* Check for a valid destination LID (see ch. 7.11.1). */
- lid = be16_to_cpu(hdr->lrh[1]);
- if (lid < IPATH_MULTICAST_LID_BASE) {
- lid &= ~((1 << dev->dd->ipath_lmc) - 1);
- if (unlikely(lid != dev->dd->ipath_lid)) {
- dev->rcv_errors++;
- goto bail;
- }
- }
-
- /* Check for GRH */
- lnh = be16_to_cpu(hdr->lrh[0]) & 3;
- if (lnh == IPATH_LRH_BTH)
- ohdr = &hdr->u.oth;
- else if (lnh == IPATH_LRH_GRH)
- ohdr = &hdr->u.l.oth;
- else {
- dev->rcv_errors++;
- goto bail;
- }
-
- opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
- dev->opstats[opcode].n_bytes += tlen;
- dev->opstats[opcode].n_packets++;
-
- /* Get the destination QP number. */
- qp_num = be32_to_cpu(ohdr->bth[1]) & IPATH_QPN_MASK;
- if (qp_num == IPATH_MULTICAST_QPN) {
- struct ipath_mcast *mcast;
- struct ipath_mcast_qp *p;
-
- if (lnh != IPATH_LRH_GRH) {
- dev->n_pkt_drops++;
- goto bail;
- }
- mcast = ipath_mcast_find(&hdr->u.l.grh.dgid);
- if (mcast == NULL) {
- dev->n_pkt_drops++;
- goto bail;
- }
- dev->n_multicast_rcv++;
- list_for_each_entry_rcu(p, &mcast->qp_list, list)
- ipath_qp_rcv(dev, hdr, 1, data, tlen, p->qp);
- /*
- * Notify ipath_multicast_detach() if it is waiting for us
- * to finish.
- */
- if (atomic_dec_return(&mcast->refcount) <= 1)
- wake_up(&mcast->wait);
- } else {
- qp = ipath_lookup_qpn(&dev->qp_table, qp_num);
- if (qp) {
- dev->n_unicast_rcv++;
- ipath_qp_rcv(dev, hdr, lnh == IPATH_LRH_GRH, data,
- tlen, qp);
- /*
- * Notify ipath_destroy_qp() if it is waiting
- * for us to finish.
- */
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
- } else
- dev->n_pkt_drops++;
- }
-
-bail:;
-}
-
-/**
- * ipath_ib_timer - verbs timer
- * @arg: the device pointer
- *
- * This is called from ipath_do_rcv_timer() at interrupt level to check for
- * QPs which need retransmits and to collect performance numbers.
- */
-static void ipath_ib_timer(struct ipath_ibdev *dev)
-{
- struct ipath_qp *resend = NULL;
- struct ipath_qp *rnr = NULL;
- struct list_head *last;
- struct ipath_qp *qp;
- unsigned long flags;
-
- if (dev == NULL)
- return;
-
- spin_lock_irqsave(&dev->pending_lock, flags);
- /* Start filling the next pending queue. */
- if (++dev->pending_index >= ARRAY_SIZE(dev->pending))
- dev->pending_index = 0;
- /* Save any requests still in the new queue, they have timed out. */
- last = &dev->pending[dev->pending_index];
- while (!list_empty(last)) {
- qp = list_entry(last->next, struct ipath_qp, timerwait);
- list_del_init(&qp->timerwait);
- qp->timer_next = resend;
- resend = qp;
- atomic_inc(&qp->refcount);
- }
- last = &dev->rnrwait;
- if (!list_empty(last)) {
- qp = list_entry(last->next, struct ipath_qp, timerwait);
- if (--qp->s_rnr_timeout == 0) {
- do {
- list_del_init(&qp->timerwait);
- qp->timer_next = rnr;
- rnr = qp;
- atomic_inc(&qp->refcount);
- if (list_empty(last))
- break;
- qp = list_entry(last->next, struct ipath_qp,
- timerwait);
- } while (qp->s_rnr_timeout == 0);
- }
- }
- /*
- * We should only be in the started state if pma_sample_start != 0
- */
- if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED &&
- --dev->pma_sample_start == 0) {
- dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
- ipath_snapshot_counters(dev->dd, &dev->ipath_sword,
- &dev->ipath_rword,
- &dev->ipath_spkts,
- &dev->ipath_rpkts,
- &dev->ipath_xmit_wait);
- }
- if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
- if (dev->pma_sample_interval == 0) {
- u64 ta, tb, tc, td, te;
-
- dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
- ipath_snapshot_counters(dev->dd, &ta, &tb,
- &tc, &td, &te);
-
- dev->ipath_sword = ta - dev->ipath_sword;
- dev->ipath_rword = tb - dev->ipath_rword;
- dev->ipath_spkts = tc - dev->ipath_spkts;
- dev->ipath_rpkts = td - dev->ipath_rpkts;
- dev->ipath_xmit_wait = te - dev->ipath_xmit_wait;
- } else {
- dev->pma_sample_interval--;
- }
- }
- spin_unlock_irqrestore(&dev->pending_lock, flags);
-
- /* XXX What if timer fires again while this is running? */
- while (resend != NULL) {
- qp = resend;
- resend = qp->timer_next;
-
- spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_last != qp->s_tail &&
- ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) {
- dev->n_timeouts++;
- ipath_restart_rc(qp, qp->s_last_psn + 1);
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
- /* Notify ipath_destroy_qp() if it is waiting. */
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
- }
- while (rnr != NULL) {
- qp = rnr;
- rnr = qp->timer_next;
-
- spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)
- ipath_schedule_send(qp);
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
- /* Notify ipath_destroy_qp() if it is waiting. */
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
- }
-}
-
-static void update_sge(struct ipath_sge_state *ss, u32 length)
-{
- struct ipath_sge *sge = &ss->sge;
-
- sge->vaddr += length;
- sge->length -= length;
- sge->sge_length -= length;
- if (sge->sge_length == 0) {
- if (--ss->num_sge)
- *sge = *ss->sg_list++;
- } else if (sge->length == 0 && sge->mr != NULL) {
- if (++sge->n >= IPATH_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- return;
- sge->n = 0;
- }
- sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
- }
-}
-
-#ifdef __LITTLE_ENDIAN
-static inline u32 get_upper_bits(u32 data, u32 shift)
-{
- return data >> shift;
-}
-
-static inline u32 set_upper_bits(u32 data, u32 shift)
-{
- return data << shift;
-}
-
-static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
-{
- data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
- data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
- return data;
-}
-#else
-static inline u32 get_upper_bits(u32 data, u32 shift)
-{
- return data << shift;
-}
-
-static inline u32 set_upper_bits(u32 data, u32 shift)
-{
- return data >> shift;
-}
-
-static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
-{
- data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
- data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
- return data;
-}
-#endif
-
-static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
- u32 length, unsigned flush_wc)
-{
- u32 extra = 0;
- u32 data = 0;
- u32 last;
-
- while (1) {
- u32 len = ss->sge.length;
- u32 off;
-
- if (len > length)
- len = length;
- if (len > ss->sge.sge_length)
- len = ss->sge.sge_length;
- BUG_ON(len == 0);
- /* If the source address is not aligned, try to align it. */
- off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
- if (off) {
- u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
- ~(sizeof(u32) - 1));
- u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
- u32 y;
-
- y = sizeof(u32) - off;
- if (len > y)
- len = y;
- if (len + extra >= sizeof(u32)) {
- data |= set_upper_bits(v, extra *
- BITS_PER_BYTE);
- len = sizeof(u32) - extra;
- if (len == length) {
- last = data;
- break;
- }
- __raw_writel(data, piobuf);
- piobuf++;
- extra = 0;
- data = 0;
- } else {
- /* Clear unused upper bytes */
- data |= clear_upper_bytes(v, len, extra);
- if (len == length) {
- last = data;
- break;
- }
- extra += len;
- }
- } else if (extra) {
- /* Source address is aligned. */
- u32 *addr = (u32 *) ss->sge.vaddr;
- int shift = extra * BITS_PER_BYTE;
- int ushift = 32 - shift;
- u32 l = len;
-
- while (l >= sizeof(u32)) {
- u32 v = *addr;
-
- data |= set_upper_bits(v, shift);
- __raw_writel(data, piobuf);
- data = get_upper_bits(v, ushift);
- piobuf++;
- addr++;
- l -= sizeof(u32);
- }
- /*
- * We still have 'extra' number of bytes leftover.
- */
- if (l) {
- u32 v = *addr;
-
- if (l + extra >= sizeof(u32)) {
- data |= set_upper_bits(v, shift);
- len -= l + extra - sizeof(u32);
- if (len == length) {
- last = data;
- break;
- }
- __raw_writel(data, piobuf);
- piobuf++;
- extra = 0;
- data = 0;
- } else {
- /* Clear unused upper bytes */
- data |= clear_upper_bytes(v, l,
- extra);
- if (len == length) {
- last = data;
- break;
- }
- extra += l;
- }
- } else if (len == length) {
- last = data;
- break;
- }
- } else if (len == length) {
- u32 w;
-
- /*
- * Need to round up for the last dword in the
- * packet.
- */
- w = (len + 3) >> 2;
- __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
- piobuf += w - 1;
- last = ((u32 *) ss->sge.vaddr)[w - 1];
- break;
- } else {
- u32 w = len >> 2;
-
- __iowrite32_copy(piobuf, ss->sge.vaddr, w);
- piobuf += w;
-
- extra = len & (sizeof(u32) - 1);
- if (extra) {
- u32 v = ((u32 *) ss->sge.vaddr)[w];
-
- /* Clear unused upper bytes */
- data = clear_upper_bytes(v, extra, 0);
- }
- }
- update_sge(ss, len);
- length -= len;
- }
- /* Update address before sending packet. */
- update_sge(ss, length);
- if (flush_wc) {
- /* must flush early everything before trigger word */
- ipath_flush_wc();
- __raw_writel(last, piobuf);
- /* be sure trigger word is written */
- ipath_flush_wc();
- } else
- __raw_writel(last, piobuf);
-}
-
-/*
- * Convert IB rate to delay multiplier.
- */
-unsigned ipath_ib_rate_to_mult(enum ib_rate rate)
-{
- switch (rate) {
- case IB_RATE_2_5_GBPS: return 8;
- case IB_RATE_5_GBPS: return 4;
- case IB_RATE_10_GBPS: return 2;
- case IB_RATE_20_GBPS: return 1;
- default: return 0;
- }
-}
-
-/*
- * Convert delay multiplier to IB rate
- */
-static enum ib_rate ipath_mult_to_ib_rate(unsigned mult)
-{
- switch (mult) {
- case 8: return IB_RATE_2_5_GBPS;
- case 4: return IB_RATE_5_GBPS;
- case 2: return IB_RATE_10_GBPS;
- case 1: return IB_RATE_20_GBPS;
- default: return IB_RATE_PORT_CURRENT;
- }
-}
-
-static inline struct ipath_verbs_txreq *get_txreq(struct ipath_ibdev *dev)
-{
- struct ipath_verbs_txreq *tx = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->pending_lock, flags);
- if (!list_empty(&dev->txreq_free)) {
- struct list_head *l = dev->txreq_free.next;
-
- list_del(l);
- tx = list_entry(l, struct ipath_verbs_txreq, txreq.list);
- }
- spin_unlock_irqrestore(&dev->pending_lock, flags);
- return tx;
-}
-
-static inline void put_txreq(struct ipath_ibdev *dev,
- struct ipath_verbs_txreq *tx)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dev->pending_lock, flags);
- list_add(&tx->txreq.list, &dev->txreq_free);
- spin_unlock_irqrestore(&dev->pending_lock, flags);
-}
-
-static void sdma_complete(void *cookie, int status)
-{
- struct ipath_verbs_txreq *tx = cookie;
- struct ipath_qp *qp = tx->qp;
- struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
- unsigned long flags;
- enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ?
- IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR;
-
- if (atomic_dec_and_test(&qp->s_dma_busy)) {
- spin_lock_irqsave(&qp->s_lock, flags);
- if (tx->wqe)
- ipath_send_complete(qp, tx->wqe, ibs);
- if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND &&
- qp->s_last != qp->s_head) ||
- (qp->s_flags & IPATH_S_WAIT_DMA))
- ipath_schedule_send(qp);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- wake_up(&qp->wait_dma);
- } else if (tx->wqe) {
- spin_lock_irqsave(&qp->s_lock, flags);
- ipath_send_complete(qp, tx->wqe, ibs);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- }
-
- if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
- kfree(tx->txreq.map_addr);
- put_txreq(dev, tx);
-
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
-}
-
-static void decrement_dma_busy(struct ipath_qp *qp)
-{
- unsigned long flags;
-
- if (atomic_dec_and_test(&qp->s_dma_busy)) {
- spin_lock_irqsave(&qp->s_lock, flags);
- if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND &&
- qp->s_last != qp->s_head) ||
- (qp->s_flags & IPATH_S_WAIT_DMA))
- ipath_schedule_send(qp);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- wake_up(&qp->wait_dma);
- }
-}
-
-/*
- * Compute the number of clock cycles of delay before sending the next packet.
- * The multipliers reflect the number of clocks for the fastest rate so
- * one tick at 4xDDR is 8 ticks at 1xSDR.
- * If the destination port will take longer to receive a packet than
- * the outgoing link can send it, we need to delay sending the next packet
- * by the difference in time it takes the receiver to receive and the sender
- * to send this packet.
- * Note that this delay is always correct for UC and RC but not always
- * optimal for UD. For UD, the destination HCA can be different for each
- * packet, in which case, we could send packets to a different destination
- * while "waiting" for the delay. The overhead for doing this without
- * HW support is more than just paying the cost of delaying some packets
- * unnecessarily.
- */
-static inline unsigned ipath_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult)
-{
- return (rcv_mult > snd_mult) ?
- (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
-}
-
-static int ipath_verbs_send_dma(struct ipath_qp *qp,
- struct ipath_ib_header *hdr, u32 hdrwords,
- struct ipath_sge_state *ss, u32 len,
- u32 plen, u32 dwords)
-{
- struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
- struct ipath_devdata *dd = dev->dd;
- struct ipath_verbs_txreq *tx;
- u32 *piobuf;
- u32 control;
- u32 ndesc;
- int ret;
-
- tx = qp->s_tx;
- if (tx) {
- qp->s_tx = NULL;
- /* resend previously constructed packet */
- atomic_inc(&qp->s_dma_busy);
- ret = ipath_sdma_verbs_send(dd, tx->ss, tx->len, tx);
- if (ret) {
- qp->s_tx = tx;
- decrement_dma_busy(qp);
- }
- goto bail;
- }
-
- tx = get_txreq(dev);
- if (!tx) {
- ret = -EBUSY;
- goto bail;
- }
-
- /*
- * Get the saved delay count we computed for the previous packet
- * and save the delay count for this packet to be used next time
- * we get here.
- */
- control = qp->s_pkt_delay;
- qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
-
- tx->qp = qp;
- atomic_inc(&qp->refcount);
- tx->wqe = qp->s_wqe;
- tx->txreq.callback = sdma_complete;
- tx->txreq.callback_cookie = tx;
- tx->txreq.flags = IPATH_SDMA_TXREQ_F_HEADTOHOST |
- IPATH_SDMA_TXREQ_F_INTREQ | IPATH_SDMA_TXREQ_F_FREEDESC;
- if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
- tx->txreq.flags |= IPATH_SDMA_TXREQ_F_USELARGEBUF;
-
- /* VL15 packets bypass credit check */
- if ((be16_to_cpu(hdr->lrh[0]) >> 12) == 15) {
- control |= 1ULL << 31;
- tx->txreq.flags |= IPATH_SDMA_TXREQ_F_VL15;
- }
-
- if (len) {
- /*
- * Don't try to DMA if it takes more descriptors than
- * the queue holds.
- */
- ndesc = ipath_count_sge(ss, len);
- if (ndesc >= dd->ipath_sdma_descq_cnt)
- ndesc = 0;
- } else
- ndesc = 1;
- if (ndesc) {
- tx->hdr.pbc[0] = cpu_to_le32(plen);
- tx->hdr.pbc[1] = cpu_to_le32(control);
- memcpy(&tx->hdr.hdr, hdr, hdrwords << 2);
- tx->txreq.sg_count = ndesc;
- tx->map_len = (hdrwords + 2) << 2;
- tx->txreq.map_addr = &tx->hdr;
- atomic_inc(&qp->s_dma_busy);
- ret = ipath_sdma_verbs_send(dd, ss, dwords, tx);
- if (ret) {
- /* save ss and length in dwords */
- tx->ss = ss;
- tx->len = dwords;
- qp->s_tx = tx;
- decrement_dma_busy(qp);
- }
- goto bail;
- }
-
- /* Allocate a buffer and copy the header and payload to it. */
- tx->map_len = (plen + 1) << 2;
- piobuf = kmalloc(tx->map_len, GFP_ATOMIC);
- if (unlikely(piobuf == NULL)) {
- ret = -EBUSY;
- goto err_tx;
- }
- tx->txreq.map_addr = piobuf;
- tx->txreq.flags |= IPATH_SDMA_TXREQ_F_FREEBUF;
- tx->txreq.sg_count = 1;
-
- *piobuf++ = (__force u32) cpu_to_le32(plen);
- *piobuf++ = (__force u32) cpu_to_le32(control);
- memcpy(piobuf, hdr, hdrwords << 2);
- ipath_copy_from_sge(piobuf + hdrwords, ss, len);
-
- atomic_inc(&qp->s_dma_busy);
- ret = ipath_sdma_verbs_send(dd, NULL, 0, tx);
- /*
- * If we couldn't queue the DMA request, save the info
- * and try again later rather than destroying the
- * buffer and undoing the side effects of the copy.
- */
- if (ret) {
- tx->ss = NULL;
- tx->len = 0;
- qp->s_tx = tx;
- decrement_dma_busy(qp);
- }
- dev->n_unaligned++;
- goto bail;
-
-err_tx:
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
- put_txreq(dev, tx);
-bail:
- return ret;
-}
-
-static int ipath_verbs_send_pio(struct ipath_qp *qp,
- struct ipath_ib_header *ibhdr, u32 hdrwords,
- struct ipath_sge_state *ss, u32 len,
- u32 plen, u32 dwords)
-{
- struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
- u32 *hdr = (u32 *) ibhdr;
- u32 __iomem *piobuf;
- unsigned flush_wc;
- u32 control;
- int ret;
- unsigned long flags;
-
- piobuf = ipath_getpiobuf(dd, plen, NULL);
- if (unlikely(piobuf == NULL)) {
- ret = -EBUSY;
- goto bail;
- }
-
- /*
- * Get the saved delay count we computed for the previous packet
- * and save the delay count for this packet to be used next time
- * we get here.
- */
- control = qp->s_pkt_delay;
- qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
-
- /* VL15 packets bypass credit check */
- if ((be16_to_cpu(ibhdr->lrh[0]) >> 12) == 15)
- control |= 1ULL << 31;
-
- /*
- * Write the length to the control qword plus any needed flags.
- * We have to flush after the PBC for correctness on some cpus
- * or WC buffer can be written out of order.
- */
- writeq(((u64) control << 32) | plen, piobuf);
- piobuf += 2;
-
- flush_wc = dd->ipath_flags & IPATH_PIO_FLUSH_WC;
- if (len == 0) {
- /*
- * If there is just the header portion, must flush before
- * writing last word of header for correctness, and after
- * the last header word (trigger word).
- */
- if (flush_wc) {
- ipath_flush_wc();
- __iowrite32_copy(piobuf, hdr, hdrwords - 1);
- ipath_flush_wc();
- __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
- ipath_flush_wc();
- } else
- __iowrite32_copy(piobuf, hdr, hdrwords);
- goto done;
- }
-
- if (flush_wc)
- ipath_flush_wc();
- __iowrite32_copy(piobuf, hdr, hdrwords);
- piobuf += hdrwords;
-
- /* The common case is aligned and contained in one segment. */
- if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
- !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
- u32 *addr = (u32 *) ss->sge.vaddr;
-
- /* Update address before sending packet. */
- update_sge(ss, len);
- if (flush_wc) {
- __iowrite32_copy(piobuf, addr, dwords - 1);
- /* must flush early everything before trigger word */
- ipath_flush_wc();
- __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
- /* be sure trigger word is written */
- ipath_flush_wc();
- } else
- __iowrite32_copy(piobuf, addr, dwords);
- goto done;
- }
- copy_io(piobuf, ss, len, flush_wc);
-done:
- if (qp->s_wqe) {
- spin_lock_irqsave(&qp->s_lock, flags);
- ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- }
- ret = 0;
-bail:
- return ret;
-}
-
-/**
- * ipath_verbs_send - send a packet
- * @qp: the QP to send on
- * @hdr: the packet header
- * @hdrwords: the number of 32-bit words in the header
- * @ss: the SGE to send
- * @len: the length of the packet in bytes
- */
-int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
- u32 hdrwords, struct ipath_sge_state *ss, u32 len)
-{
- struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
- u32 plen;
- int ret;
- u32 dwords = (len + 3) >> 2;
-
- /*
- * Calculate the send buffer trigger address.
- * The +1 counts for the pbc control dword following the pbc length.
- */
- plen = hdrwords + dwords + 1;
-
- /*
- * VL15 packets (IB_QPT_SMI) will always use PIO, so we
- * can defer SDMA restart until link goes ACTIVE without
- * worrying about just how we got there.
- */
- if (qp->ibqp.qp_type == IB_QPT_SMI ||
- !(dd->ipath_flags & IPATH_HAS_SEND_DMA))
- ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len,
- plen, dwords);
- else
- ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len,
- plen, dwords);
-
- return ret;
-}
-
-int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
- u64 *rwords, u64 *spkts, u64 *rpkts,
- u64 *xmit_wait)
-{
- int ret;
-
- if (!(dd->ipath_flags & IPATH_INITTED)) {
- /* no hardware, freeze, etc. */
- ret = -EINVAL;
- goto bail;
- }
- *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
- *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
- *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
- *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
- *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * ipath_get_counters - get various chip counters
- * @dd: the infinipath device
- * @cntrs: counters are placed here
- *
- * Return the counters needed by recv_pma_get_portcounters().
- */
-int ipath_get_counters(struct ipath_devdata *dd,
- struct ipath_verbs_counters *cntrs)
-{
- struct ipath_cregs const *crp = dd->ipath_cregs;
- int ret;
-
- if (!(dd->ipath_flags & IPATH_INITTED)) {
- /* no hardware, freeze, etc. */
- ret = -EINVAL;
- goto bail;
- }
- cntrs->symbol_error_counter =
- ipath_snap_cntr(dd, crp->cr_ibsymbolerrcnt);
- cntrs->link_error_recovery_counter =
- ipath_snap_cntr(dd, crp->cr_iblinkerrrecovcnt);
- /*
- * The link downed counter counts when the other side downs the
- * connection. We add in the number of times we downed the link
- * due to local link integrity errors to compensate.
- */
- cntrs->link_downed_counter =
- ipath_snap_cntr(dd, crp->cr_iblinkdowncnt);
- cntrs->port_rcv_errors =
- ipath_snap_cntr(dd, crp->cr_rxdroppktcnt) +
- ipath_snap_cntr(dd, crp->cr_rcvovflcnt) +
- ipath_snap_cntr(dd, crp->cr_portovflcnt) +
- ipath_snap_cntr(dd, crp->cr_err_rlencnt) +
- ipath_snap_cntr(dd, crp->cr_invalidrlencnt) +
- ipath_snap_cntr(dd, crp->cr_errlinkcnt) +
- ipath_snap_cntr(dd, crp->cr_erricrccnt) +
- ipath_snap_cntr(dd, crp->cr_errvcrccnt) +
- ipath_snap_cntr(dd, crp->cr_errlpcrccnt) +
- ipath_snap_cntr(dd, crp->cr_badformatcnt) +
- dd->ipath_rxfc_unsupvl_errs;
- if (crp->cr_rxotherlocalphyerrcnt)
- cntrs->port_rcv_errors +=
- ipath_snap_cntr(dd, crp->cr_rxotherlocalphyerrcnt);
- if (crp->cr_rxvlerrcnt)
- cntrs->port_rcv_errors +=
- ipath_snap_cntr(dd, crp->cr_rxvlerrcnt);
- cntrs->port_rcv_remphys_errors =
- ipath_snap_cntr(dd, crp->cr_rcvebpcnt);
- cntrs->port_xmit_discards = ipath_snap_cntr(dd, crp->cr_unsupvlcnt);
- cntrs->port_xmit_data = ipath_snap_cntr(dd, crp->cr_wordsendcnt);
- cntrs->port_rcv_data = ipath_snap_cntr(dd, crp->cr_wordrcvcnt);
- cntrs->port_xmit_packets = ipath_snap_cntr(dd, crp->cr_pktsendcnt);
- cntrs->port_rcv_packets = ipath_snap_cntr(dd, crp->cr_pktrcvcnt);
- cntrs->local_link_integrity_errors =
- crp->cr_locallinkintegrityerrcnt ?
- ipath_snap_cntr(dd, crp->cr_locallinkintegrityerrcnt) :
- ((dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
- dd->ipath_lli_errs : dd->ipath_lli_errors);
- cntrs->excessive_buffer_overrun_errors =
- crp->cr_excessbufferovflcnt ?
- ipath_snap_cntr(dd, crp->cr_excessbufferovflcnt) :
- dd->ipath_overrun_thresh_errs;
- cntrs->vl15_dropped = crp->cr_vl15droppedpktcnt ?
- ipath_snap_cntr(dd, crp->cr_vl15droppedpktcnt) : 0;
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * ipath_ib_piobufavail - callback when a PIO buffer is available
- * @arg: the device pointer
- *
- * This is called from ipath_intr() at interrupt level when a PIO buffer is
- * available after ipath_verbs_send() returned an error that no buffers were
- * available. Return 1 if we consumed all the PIO buffers and we still have
- * QPs waiting for buffers (for now, just restart the send tasklet and
- * return zero).
- */
-int ipath_ib_piobufavail(struct ipath_ibdev *dev)
-{
- struct list_head *list;
- struct ipath_qp *qplist;
- struct ipath_qp *qp;
- unsigned long flags;
-
- if (dev == NULL)
- goto bail;
-
- list = &dev->piowait;
- qplist = NULL;
-
- spin_lock_irqsave(&dev->pending_lock, flags);
- while (!list_empty(list)) {
- qp = list_entry(list->next, struct ipath_qp, piowait);
- list_del_init(&qp->piowait);
- qp->pio_next = qplist;
- qplist = qp;
- atomic_inc(&qp->refcount);
- }
- spin_unlock_irqrestore(&dev->pending_lock, flags);
-
- while (qplist != NULL) {
- qp = qplist;
- qplist = qp->pio_next;
-
- spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)
- ipath_schedule_send(qp);
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
- /* Notify ipath_destroy_qp() if it is waiting. */
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
- }
-
-bail:
- return 0;
-}
-
-static int ipath_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
- struct ib_udata *uhw)
-{
- struct ipath_ibdev *dev = to_idev(ibdev);
-
- if (uhw->inlen || uhw->outlen)
- return -EINVAL;
-
- memset(props, 0, sizeof(*props));
-
- props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
- IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
- IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
- IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
- props->page_size_cap = PAGE_SIZE;
- props->vendor_id =
- IPATH_SRC_OUI_1 << 16 | IPATH_SRC_OUI_2 << 8 | IPATH_SRC_OUI_3;
- props->vendor_part_id = dev->dd->ipath_deviceid;
- props->hw_ver = dev->dd->ipath_pcirev;
-
- props->sys_image_guid = dev->sys_image_guid;
-
- props->max_mr_size = ~0ull;
- props->max_qp = ib_ipath_max_qps;
- props->max_qp_wr = ib_ipath_max_qp_wrs;
- props->max_sge = ib_ipath_max_sges;
- props->max_sge_rd = ib_ipath_max_sges;
- props->max_cq = ib_ipath_max_cqs;
- props->max_ah = ib_ipath_max_ahs;
- props->max_cqe = ib_ipath_max_cqes;
- props->max_mr = dev->lk_table.max;
- props->max_fmr = dev->lk_table.max;
- props->max_map_per_fmr = 32767;
- props->max_pd = ib_ipath_max_pds;
- props->max_qp_rd_atom = IPATH_MAX_RDMA_ATOMIC;
- props->max_qp_init_rd_atom = 255;
- /* props->max_res_rd_atom */
- props->max_srq = ib_ipath_max_srqs;
- props->max_srq_wr = ib_ipath_max_srq_wrs;
- props->max_srq_sge = ib_ipath_max_srq_sges;
- /* props->local_ca_ack_delay */
- props->atomic_cap = IB_ATOMIC_GLOB;
- props->max_pkeys = ipath_get_npkeys(dev->dd);
- props->max_mcast_grp = ib_ipath_max_mcast_grps;
- props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached;
- props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
- props->max_mcast_grp;
-
- return 0;
-}
-
-const u8 ipath_cvt_physportstate[32] = {
- [INFINIPATH_IBCS_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
- [INFINIPATH_IBCS_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
- [INFINIPATH_IBCS_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
- [INFINIPATH_IBCS_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
- [INFINIPATH_IBCS_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
- [INFINIPATH_IBCS_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
- [INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [INFINIPATH_IBCS_LT_STATE_CFGWAITRMT] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [INFINIPATH_IBCS_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
-};
-
-u32 ipath_get_cr_errpkey(struct ipath_devdata *dd)
-{
- return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
-}
-
-static int ipath_query_port(struct ib_device *ibdev,
- u8 port, struct ib_port_attr *props)
-{
- struct ipath_ibdev *dev = to_idev(ibdev);
- struct ipath_devdata *dd = dev->dd;
- enum ib_mtu mtu;
- u16 lid = dd->ipath_lid;
- u64 ibcstat;
-
- memset(props, 0, sizeof(*props));
- props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
- props->lmc = dd->ipath_lmc;
- props->sm_lid = dev->sm_lid;
- props->sm_sl = dev->sm_sl;
- ibcstat = dd->ipath_lastibcstat;
- /* map LinkState to IB portinfo values. */
- props->state = ipath_ib_linkstate(dd, ibcstat) + 1;
-
- /* See phys_state_show() */
- props->phys_state = /* MEA: assumes shift == 0 */
- ipath_cvt_physportstate[dd->ipath_lastibcstat &
- dd->ibcs_lts_mask];
- props->port_cap_flags = dev->port_cap_flags;
- props->gid_tbl_len = 1;
- props->max_msg_sz = 0x80000000;
- props->pkey_tbl_len = ipath_get_npkeys(dd);
- props->bad_pkey_cntr = ipath_get_cr_errpkey(dd) -
- dev->z_pkey_violations;
- props->qkey_viol_cntr = dev->qkey_violations;
- props->active_width = dd->ipath_link_width_active;
- /* See rate_show() */
- props->active_speed = dd->ipath_link_speed_active;
- props->max_vl_num = 1; /* VLCap = VL0 */
- props->init_type_reply = 0;
-
- props->max_mtu = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;
- switch (dd->ipath_ibmtu) {
- case 4096:
- mtu = IB_MTU_4096;
- break;
- case 2048:
- mtu = IB_MTU_2048;
- break;
- case 1024:
- mtu = IB_MTU_1024;
- break;
- case 512:
- mtu = IB_MTU_512;
- break;
- case 256:
- mtu = IB_MTU_256;
- break;
- default:
- mtu = IB_MTU_2048;
- }
- props->active_mtu = mtu;
- props->subnet_timeout = dev->subnet_timeout;
-
- return 0;
-}
-
-static int ipath_modify_device(struct ib_device *device,
- int device_modify_mask,
- struct ib_device_modify *device_modify)
-{
- int ret;
-
- if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
- IB_DEVICE_MODIFY_NODE_DESC)) {
- ret = -EOPNOTSUPP;
- goto bail;
- }
-
- if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC)
- memcpy(device->node_desc, device_modify->node_desc, 64);
-
- if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
- to_idev(device)->sys_image_guid =
- cpu_to_be64(device_modify->sys_image_guid);
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-static int ipath_modify_port(struct ib_device *ibdev,
- u8 port, int port_modify_mask,
- struct ib_port_modify *props)
-{
- struct ipath_ibdev *dev = to_idev(ibdev);
-
- dev->port_cap_flags |= props->set_port_cap_mask;
- dev->port_cap_flags &= ~props->clr_port_cap_mask;
- if (port_modify_mask & IB_PORT_SHUTDOWN)
- ipath_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
- if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
- dev->qkey_violations = 0;
- return 0;
-}
-
-static int ipath_query_gid(struct ib_device *ibdev, u8 port,
- int index, union ib_gid *gid)
-{
- struct ipath_ibdev *dev = to_idev(ibdev);
- int ret;
-
- if (index >= 1) {
- ret = -EINVAL;
- goto bail;
- }
- gid->global.subnet_prefix = dev->gid_prefix;
- gid->global.interface_id = dev->dd->ipath_guid;
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-static struct ib_pd *ipath_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
-{
- struct ipath_ibdev *dev = to_idev(ibdev);
- struct ipath_pd *pd;
- struct ib_pd *ret;
-
- /*
- * This is actually totally arbitrary. Some correctness tests
- * assume there's a maximum number of PDs that can be allocated.
- * We don't actually have this limit, but we fail the test if
- * we allow allocations of more than we report for this value.
- */
-
- pd = kmalloc(sizeof *pd, GFP_KERNEL);
- if (!pd) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- spin_lock(&dev->n_pds_lock);
- if (dev->n_pds_allocated == ib_ipath_max_pds) {
- spin_unlock(&dev->n_pds_lock);
- kfree(pd);
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- dev->n_pds_allocated++;
- spin_unlock(&dev->n_pds_lock);
-
- /* ib_alloc_pd() will initialize pd->ibpd. */
- pd->user = udata != NULL;
-
- ret = &pd->ibpd;
-
-bail:
- return ret;
-}
-
-static int ipath_dealloc_pd(struct ib_pd *ibpd)
-{
- struct ipath_pd *pd = to_ipd(ibpd);
- struct ipath_ibdev *dev = to_idev(ibpd->device);
-
- spin_lock(&dev->n_pds_lock);
- dev->n_pds_allocated--;
- spin_unlock(&dev->n_pds_lock);
-
- kfree(pd);
-
- return 0;
-}
-
-/**
- * ipath_create_ah - create an address handle
- * @pd: the protection domain
- * @ah_attr: the attributes of the AH
- *
- * This may be called from interrupt context.
- */
-static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
- struct ib_ah_attr *ah_attr)
-{
- struct ipath_ah *ah;
- struct ib_ah *ret;
- struct ipath_ibdev *dev = to_idev(pd->device);
- unsigned long flags;
-
- /* A multicast address requires a GRH (see ch. 8.4.1). */
- if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
- ah_attr->dlid != IPATH_PERMISSIVE_LID &&
- !(ah_attr->ah_flags & IB_AH_GRH)) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
-
- if (ah_attr->dlid == 0) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
-
- if (ah_attr->port_num < 1 ||
- ah_attr->port_num > pd->device->phys_port_cnt) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
-
- ah = kmalloc(sizeof *ah, GFP_ATOMIC);
- if (!ah) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- spin_lock_irqsave(&dev->n_ahs_lock, flags);
- if (dev->n_ahs_allocated == ib_ipath_max_ahs) {
- spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
- kfree(ah);
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- dev->n_ahs_allocated++;
- spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
-
- /* ib_create_ah() will initialize ah->ibah. */
- ah->attr = *ah_attr;
- ah->attr.static_rate = ipath_ib_rate_to_mult(ah_attr->static_rate);
-
- ret = &ah->ibah;
-
-bail:
- return ret;
-}
-
-/**
- * ipath_destroy_ah - destroy an address handle
- * @ibah: the AH to destroy
- *
- * This may be called from interrupt context.
- */
-static int ipath_destroy_ah(struct ib_ah *ibah)
-{
- struct ipath_ibdev *dev = to_idev(ibah->device);
- struct ipath_ah *ah = to_iah(ibah);
- unsigned long flags;
-
- spin_lock_irqsave(&dev->n_ahs_lock, flags);
- dev->n_ahs_allocated--;
- spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
-
- kfree(ah);
-
- return 0;
-}
-
-static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
-{
- struct ipath_ah *ah = to_iah(ibah);
-
- *ah_attr = ah->attr;
- ah_attr->static_rate = ipath_mult_to_ib_rate(ah->attr.static_rate);
-
- return 0;
-}
-
-/**
- * ipath_get_npkeys - return the size of the PKEY table for port 0
- * @dd: the infinipath device
- */
-unsigned ipath_get_npkeys(struct ipath_devdata *dd)
-{
- return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
-}
-
-/**
- * ipath_get_pkey - return the indexed PKEY from the port PKEY table
- * @dd: the infinipath device
- * @index: the PKEY index
- */
-unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index)
-{
- unsigned ret;
-
- /* always a kernel port, no locking needed */
- if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
- ret = 0;
- else
- ret = dd->ipath_pd[0]->port_pkeys[index];
-
- return ret;
-}
-
-static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
- u16 *pkey)
-{
- struct ipath_ibdev *dev = to_idev(ibdev);
- int ret;
-
- if (index >= ipath_get_npkeys(dev->dd)) {
- ret = -EINVAL;
- goto bail;
- }
-
- *pkey = ipath_get_pkey(dev->dd, index);
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * ipath_alloc_ucontext - allocate a ucontest
- * @ibdev: the infiniband device
- * @udata: not used by the InfiniPath driver
- */
-
-static struct ib_ucontext *ipath_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
-{
- struct ipath_ucontext *context;
- struct ib_ucontext *ret;
-
- context = kmalloc(sizeof *context, GFP_KERNEL);
- if (!context) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- ret = &context->ibucontext;
-
-bail:
- return ret;
-}
-
-static int ipath_dealloc_ucontext(struct ib_ucontext *context)
-{
- kfree(to_iucontext(context));
- return 0;
-}
-
-static int ipath_verbs_register_sysfs(struct ib_device *dev);
-
-static void __verbs_timer(unsigned long arg)
-{
- struct ipath_devdata *dd = (struct ipath_devdata *) arg;
-
- /* Handle verbs layer timeouts. */
- ipath_ib_timer(dd->verbs_dev);
-
- mod_timer(&dd->verbs_timer, jiffies + 1);
-}
-
-static int enable_timer(struct ipath_devdata *dd)
-{
- /*
- * Early chips had a design flaw where the chip and kernel idea
- * of the tail register don't always agree, and therefore we won't
- * get an interrupt on the next packet received.
- * If the board supports per packet receive interrupts, use it.
- * Otherwise, the timer function periodically checks for packets
- * to cover this case.
- * Either way, the timer is needed for verbs layer related
- * processing.
- */
- if (dd->ipath_flags & IPATH_GPIO_INTR) {
- ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
- 0x2074076542310ULL);
- /* Enable GPIO bit 2 interrupt */
- dd->ipath_gpio_mask |= (u64) (1 << IPATH_GPIO_PORT0_BIT);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
- dd->ipath_gpio_mask);
- }
-
- setup_timer(&dd->verbs_timer, __verbs_timer, (unsigned long)dd);
-
- dd->verbs_timer.expires = jiffies + 1;
- add_timer(&dd->verbs_timer);
-
- return 0;
-}
-
-static int disable_timer(struct ipath_devdata *dd)
-{
- /* Disable GPIO bit 2 interrupt */
- if (dd->ipath_flags & IPATH_GPIO_INTR) {
- /* Disable GPIO bit 2 interrupt */
- dd->ipath_gpio_mask &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT));
- ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
- dd->ipath_gpio_mask);
- /*
- * We might want to undo changes to debugportselect,
- * but how?
- */
- }
-
- del_timer_sync(&dd->verbs_timer);
-
- return 0;
-}
-
-static int ipath_port_immutable(struct ib_device *ibdev, u8 port_num,
- struct ib_port_immutable *immutable)
-{
- struct ib_port_attr attr;
- int err;
-
- err = ipath_query_port(ibdev, port_num, &attr);
- if (err)
- return err;
-
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
- immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
- immutable->max_mad_size = IB_MGMT_MAD_SIZE;
-
- return 0;
-}
-
-/**
- * ipath_register_ib_device - register our device with the infiniband core
- * @dd: the device data structure
- * Return the allocated ipath_ibdev pointer or NULL on error.
- */
-int ipath_register_ib_device(struct ipath_devdata *dd)
-{
- struct ipath_verbs_counters cntrs;
- struct ipath_ibdev *idev;
- struct ib_device *dev;
- struct ipath_verbs_txreq *tx;
- unsigned i;
- int ret;
-
- idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev);
- if (idev == NULL) {
- ret = -ENOMEM;
- goto bail;
- }
-
- dev = &idev->ibdev;
-
- if (dd->ipath_sdma_descq_cnt) {
- tx = kmalloc_array(dd->ipath_sdma_descq_cnt, sizeof *tx,
- GFP_KERNEL);
- if (tx == NULL) {
- ret = -ENOMEM;
- goto err_tx;
- }
- } else
- tx = NULL;
- idev->txreq_bufs = tx;
-
- /* Only need to initialize non-zero fields. */
- spin_lock_init(&idev->n_pds_lock);
- spin_lock_init(&idev->n_ahs_lock);
- spin_lock_init(&idev->n_cqs_lock);
- spin_lock_init(&idev->n_qps_lock);
- spin_lock_init(&idev->n_srqs_lock);
- spin_lock_init(&idev->n_mcast_grps_lock);
-
- spin_lock_init(&idev->qp_table.lock);
- spin_lock_init(&idev->lk_table.lock);
- idev->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
- /* Set the prefix to the default value (see ch. 4.1.1) */
- idev->gid_prefix = cpu_to_be64(0xfe80000000000000ULL);
-
- ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size);
- if (ret)
- goto err_qp;
-
- /*
- * The top ib_ipath_lkey_table_size bits are used to index the
- * table. The lower 8 bits can be owned by the user (copied from
- * the LKEY). The remaining bits act as a generation number or tag.
- */
- idev->lk_table.max = 1 << ib_ipath_lkey_table_size;
- idev->lk_table.table = kcalloc(idev->lk_table.max,
- sizeof(*idev->lk_table.table),
- GFP_KERNEL);
- if (idev->lk_table.table == NULL) {
- ret = -ENOMEM;
- goto err_lk;
- }
- INIT_LIST_HEAD(&idev->pending_mmaps);
- spin_lock_init(&idev->pending_lock);
- idev->mmap_offset = PAGE_SIZE;
- spin_lock_init(&idev->mmap_offset_lock);
- INIT_LIST_HEAD(&idev->pending[0]);
- INIT_LIST_HEAD(&idev->pending[1]);
- INIT_LIST_HEAD(&idev->pending[2]);
- INIT_LIST_HEAD(&idev->piowait);
- INIT_LIST_HEAD(&idev->rnrwait);
- INIT_LIST_HEAD(&idev->txreq_free);
- idev->pending_index = 0;
- idev->port_cap_flags =
- IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP;
- if (dd->ipath_flags & IPATH_HAS_LINK_LATENCY)
- idev->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
- idev->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
- idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
- idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
- idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
- idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
-
- /* Snapshot current HW counters to "clear" them. */
- ipath_get_counters(dd, &cntrs);
- idev->z_symbol_error_counter = cntrs.symbol_error_counter;
- idev->z_link_error_recovery_counter =
- cntrs.link_error_recovery_counter;
- idev->z_link_downed_counter = cntrs.link_downed_counter;
- idev->z_port_rcv_errors = cntrs.port_rcv_errors;
- idev->z_port_rcv_remphys_errors =
- cntrs.port_rcv_remphys_errors;
- idev->z_port_xmit_discards = cntrs.port_xmit_discards;
- idev->z_port_xmit_data = cntrs.port_xmit_data;
- idev->z_port_rcv_data = cntrs.port_rcv_data;
- idev->z_port_xmit_packets = cntrs.port_xmit_packets;
- idev->z_port_rcv_packets = cntrs.port_rcv_packets;
- idev->z_local_link_integrity_errors =
- cntrs.local_link_integrity_errors;
- idev->z_excessive_buffer_overrun_errors =
- cntrs.excessive_buffer_overrun_errors;
- idev->z_vl15_dropped = cntrs.vl15_dropped;
-
- for (i = 0; i < dd->ipath_sdma_descq_cnt; i++, tx++)
- list_add(&tx->txreq.list, &idev->txreq_free);
-
- /*
- * The system image GUID is supposed to be the same for all
- * IB HCAs in a single system but since there can be other
- * device types in the system, we can't be sure this is unique.
- */
- if (!sys_image_guid)
- sys_image_guid = dd->ipath_guid;
- idev->sys_image_guid = sys_image_guid;
- idev->ib_unit = dd->ipath_unit;
- idev->dd = dd;
-
- strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX);
- dev->owner = THIS_MODULE;
- dev->node_guid = dd->ipath_guid;
- dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION;
- dev->uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
- (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
- (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_POST_SEND) |
- (1ull << IB_USER_VERBS_CMD_POST_RECV) |
- (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
- dev->node_type = RDMA_NODE_IB_CA;
- dev->phys_port_cnt = 1;
- dev->num_comp_vectors = 1;
- dev->dma_device = &dd->pcidev->dev;
- dev->query_device = ipath_query_device;
- dev->modify_device = ipath_modify_device;
- dev->query_port = ipath_query_port;
- dev->modify_port = ipath_modify_port;
- dev->query_pkey = ipath_query_pkey;
- dev->query_gid = ipath_query_gid;
- dev->alloc_ucontext = ipath_alloc_ucontext;
- dev->dealloc_ucontext = ipath_dealloc_ucontext;
- dev->alloc_pd = ipath_alloc_pd;
- dev->dealloc_pd = ipath_dealloc_pd;
- dev->create_ah = ipath_create_ah;
- dev->destroy_ah = ipath_destroy_ah;
- dev->query_ah = ipath_query_ah;
- dev->create_srq = ipath_create_srq;
- dev->modify_srq = ipath_modify_srq;
- dev->query_srq = ipath_query_srq;
- dev->destroy_srq = ipath_destroy_srq;
- dev->create_qp = ipath_create_qp;
- dev->modify_qp = ipath_modify_qp;
- dev->query_qp = ipath_query_qp;
- dev->destroy_qp = ipath_destroy_qp;
- dev->post_send = ipath_post_send;
- dev->post_recv = ipath_post_receive;
- dev->post_srq_recv = ipath_post_srq_receive;
- dev->create_cq = ipath_create_cq;
- dev->destroy_cq = ipath_destroy_cq;
- dev->resize_cq = ipath_resize_cq;
- dev->poll_cq = ipath_poll_cq;
- dev->req_notify_cq = ipath_req_notify_cq;
- dev->get_dma_mr = ipath_get_dma_mr;
- dev->reg_user_mr = ipath_reg_user_mr;
- dev->dereg_mr = ipath_dereg_mr;
- dev->alloc_fmr = ipath_alloc_fmr;
- dev->map_phys_fmr = ipath_map_phys_fmr;
- dev->unmap_fmr = ipath_unmap_fmr;
- dev->dealloc_fmr = ipath_dealloc_fmr;
- dev->attach_mcast = ipath_multicast_attach;
- dev->detach_mcast = ipath_multicast_detach;
- dev->process_mad = ipath_process_mad;
- dev->mmap = ipath_mmap;
- dev->dma_ops = &ipath_dma_mapping_ops;
- dev->get_port_immutable = ipath_port_immutable;
-
- snprintf(dev->node_desc, sizeof(dev->node_desc),
- IPATH_IDSTR " %s", init_utsname()->nodename);
-
- ret = ib_register_device(dev, NULL);
- if (ret)
- goto err_reg;
-
- ret = ipath_verbs_register_sysfs(dev);
- if (ret)
- goto err_class;
-
- enable_timer(dd);
-
- goto bail;
-
-err_class:
- ib_unregister_device(dev);
-err_reg:
- kfree(idev->lk_table.table);
-err_lk:
- kfree(idev->qp_table.table);
-err_qp:
- kfree(idev->txreq_bufs);
-err_tx:
- ib_dealloc_device(dev);
- ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret);
- idev = NULL;
-
-bail:
- dd->verbs_dev = idev;
- return ret;
-}
-
-void ipath_unregister_ib_device(struct ipath_ibdev *dev)
-{
- struct ib_device *ibdev = &dev->ibdev;
- u32 qps_inuse;
-
- ib_unregister_device(ibdev);
-
- disable_timer(dev->dd);
-
- if (!list_empty(&dev->pending[0]) ||
- !list_empty(&dev->pending[1]) ||
- !list_empty(&dev->pending[2]))
- ipath_dev_err(dev->dd, "pending list not empty!\n");
- if (!list_empty(&dev->piowait))
- ipath_dev_err(dev->dd, "piowait list not empty!\n");
- if (!list_empty(&dev->rnrwait))
- ipath_dev_err(dev->dd, "rnrwait list not empty!\n");
- if (!ipath_mcast_tree_empty())
- ipath_dev_err(dev->dd, "multicast table memory leak!\n");
- /*
- * Note that ipath_unregister_ib_device() can be called before all
- * the QPs are destroyed!
- */
- qps_inuse = ipath_free_all_qps(&dev->qp_table);
- if (qps_inuse)
- ipath_dev_err(dev->dd, "QP memory leak! %u still in use\n",
- qps_inuse);
- kfree(dev->qp_table.table);
- kfree(dev->lk_table.table);
- kfree(dev->txreq_bufs);
- ib_dealloc_device(ibdev);
-}
-
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct ipath_ibdev *dev =
- container_of(device, struct ipath_ibdev, ibdev.dev);
-
- return sprintf(buf, "%x\n", dev->dd->ipath_pcirev);
-}
-
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct ipath_ibdev *dev =
- container_of(device, struct ipath_ibdev, ibdev.dev);
- int ret;
-
- ret = dev->dd->ipath_f_get_boardname(dev->dd, buf, 128);
- if (ret < 0)
- goto bail;
- strcat(buf, "\n");
- ret = strlen(buf);
-
-bail:
- return ret;
-}
-
-static ssize_t show_stats(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct ipath_ibdev *dev =
- container_of(device, struct ipath_ibdev, ibdev.dev);
- int i;
- int len;
-
- len = sprintf(buf,
- "RC resends %d\n"
- "RC no QACK %d\n"
- "RC ACKs %d\n"
- "RC SEQ NAKs %d\n"
- "RC RDMA seq %d\n"
- "RC RNR NAKs %d\n"
- "RC OTH NAKs %d\n"
- "RC timeouts %d\n"
- "RC RDMA dup %d\n"
- "piobuf wait %d\n"
- "unaligned %d\n"
- "PKT drops %d\n"
- "WQE errs %d\n",
- dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
- dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
- dev->n_other_naks, dev->n_timeouts,
- dev->n_rdma_dup_busy, dev->n_piowait, dev->n_unaligned,
- dev->n_pkt_drops, dev->n_wqe_errs);
- for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
- const struct ipath_opcode_stats *si = &dev->opstats[i];
-
- if (!si->n_packets && !si->n_bytes)
- continue;
- len += sprintf(buf + len, "%02x %llu/%llu\n", i,
- (unsigned long long) si->n_packets,
- (unsigned long long) si->n_bytes);
- }
- return len;
-}
-
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL);
-
-static struct device_attribute *ipath_class_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id,
- &dev_attr_stats
-};
-
-static int ipath_verbs_register_sysfs(struct ib_device *dev)
-{
- int i;
- int ret;
-
- for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) {
- ret = device_create_file(&dev->dev,
- ipath_class_attributes[i]);
- if (ret)
- goto bail;
- }
- return 0;
-bail:
- for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i)
- device_remove_file(&dev->dev, ipath_class_attributes[i]);
- return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_verbs.h b/drivers/staging/rdma/ipath/ipath_verbs.h
deleted file mode 100644
index 6c70a89667a9..000000000000
--- a/drivers/staging/rdma/ipath/ipath_verbs.h
+++ /dev/null
@@ -1,941 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef IPATH_VERBS_H
-#define IPATH_VERBS_H
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/kref.h>
-#include <rdma/ib_pack.h>
-#include <rdma/ib_user_verbs.h>
-
-#include "ipath_kernel.h"
-
-#define IPATH_MAX_RDMA_ATOMIC 4
-
-#define QPN_MAX (1 << 24)
-#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
-
-/*
- * Increment this value if any changes that break userspace ABI
- * compatibility are made.
- */
-#define IPATH_UVERBS_ABI_VERSION 2
-
-/*
- * Define an ib_cq_notify value that is not valid so we know when CQ
- * notifications are armed.
- */
-#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
-
-/* AETH NAK opcode values */
-#define IB_RNR_NAK 0x20
-#define IB_NAK_PSN_ERROR 0x60
-#define IB_NAK_INVALID_REQUEST 0x61
-#define IB_NAK_REMOTE_ACCESS_ERROR 0x62
-#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
-#define IB_NAK_INVALID_RD_REQUEST 0x64
-
-/* Flags for checking QP state (see ib_ipath_state_ops[]) */
-#define IPATH_POST_SEND_OK 0x01
-#define IPATH_POST_RECV_OK 0x02
-#define IPATH_PROCESS_RECV_OK 0x04
-#define IPATH_PROCESS_SEND_OK 0x08
-#define IPATH_PROCESS_NEXT_SEND_OK 0x10
-#define IPATH_FLUSH_SEND 0x20
-#define IPATH_FLUSH_RECV 0x40
-#define IPATH_PROCESS_OR_FLUSH_SEND \
- (IPATH_PROCESS_SEND_OK | IPATH_FLUSH_SEND)
-
-/* IB Performance Manager status values */
-#define IB_PMA_SAMPLE_STATUS_DONE 0x00
-#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
-#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
-
-/* Mandatory IB performance counter select values. */
-#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
-#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
-#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
-#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
-#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
-
-struct ib_reth {
- __be64 vaddr;
- __be32 rkey;
- __be32 length;
-} __attribute__ ((packed));
-
-struct ib_atomic_eth {
- __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
- __be32 rkey;
- __be64 swap_data;
- __be64 compare_data;
-} __attribute__ ((packed));
-
-struct ipath_other_headers {
- __be32 bth[3];
- union {
- struct {
- __be32 deth[2];
- __be32 imm_data;
- } ud;
- struct {
- struct ib_reth reth;
- __be32 imm_data;
- } rc;
- struct {
- __be32 aeth;
- __be32 atomic_ack_eth[2];
- } at;
- __be32 imm_data;
- __be32 aeth;
- struct ib_atomic_eth atomic_eth;
- } u;
-} __attribute__ ((packed));
-
-/*
- * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
- * long (72 w/ imm_data). Only the first 56 bytes of the IB header
- * will be in the eager header buffer. The remaining 12 or 16 bytes
- * are in the data buffer.
- */
-struct ipath_ib_header {
- __be16 lrh[4];
- union {
- struct {
- struct ib_grh grh;
- struct ipath_other_headers oth;
- } l;
- struct ipath_other_headers oth;
- } u;
-} __attribute__ ((packed));
-
-struct ipath_pio_header {
- __le32 pbc[2];
- struct ipath_ib_header hdr;
-} __attribute__ ((packed));
-
-/*
- * There is one struct ipath_mcast for each multicast GID.
- * All attached QPs are then stored as a list of
- * struct ipath_mcast_qp.
- */
-struct ipath_mcast_qp {
- struct list_head list;
- struct ipath_qp *qp;
-};
-
-struct ipath_mcast {
- struct rb_node rb_node;
- union ib_gid mgid;
- struct list_head qp_list;
- wait_queue_head_t wait;
- atomic_t refcount;
- int n_attached;
-};
-
-/* Protection domain */
-struct ipath_pd {
- struct ib_pd ibpd;
- int user; /* non-zero if created from user space */
-};
-
-/* Address Handle */
-struct ipath_ah {
- struct ib_ah ibah;
- struct ib_ah_attr attr;
-};
-
-/*
- * This structure is used by ipath_mmap() to validate an offset
- * when an mmap() request is made. The vm_area_struct then uses
- * this as its vm_private_data.
- */
-struct ipath_mmap_info {
- struct list_head pending_mmaps;
- struct ib_ucontext *context;
- void *obj;
- __u64 offset;
- struct kref ref;
- unsigned size;
-};
-
-/*
- * This structure is used to contain the head pointer, tail pointer,
- * and completion queue entries as a single memory allocation so
- * it can be mmap'ed into user space.
- */
-struct ipath_cq_wc {
- u32 head; /* index of next entry to fill */
- u32 tail; /* index of next ib_poll_cq() entry */
- union {
- /* these are actually size ibcq.cqe + 1 */
- struct ib_uverbs_wc uqueue[0];
- struct ib_wc kqueue[0];
- };
-};
-
-/*
- * The completion queue structure.
- */
-struct ipath_cq {
- struct ib_cq ibcq;
- struct tasklet_struct comptask;
- spinlock_t lock;
- u8 notify;
- u8 triggered;
- struct ipath_cq_wc *queue;
- struct ipath_mmap_info *ip;
-};
-
-/*
- * A segment is a linear region of low physical memory.
- * XXX Maybe we should use phys addr here and kmap()/kunmap().
- * Used by the verbs layer.
- */
-struct ipath_seg {
- void *vaddr;
- size_t length;
-};
-
-/* The number of ipath_segs that fit in a page. */
-#define IPATH_SEGSZ (PAGE_SIZE / sizeof (struct ipath_seg))
-
-struct ipath_segarray {
- struct ipath_seg segs[IPATH_SEGSZ];
-};
-
-struct ipath_mregion {
- struct ib_pd *pd; /* shares refcnt of ibmr.pd */
- u64 user_base; /* User's address for this region */
- u64 iova; /* IB start address of this region */
- size_t length;
- u32 lkey;
- u32 offset; /* offset (bytes) to start of region */
- int access_flags;
- u32 max_segs; /* number of ipath_segs in all the arrays */
- u32 mapsz; /* size of the map array */
- struct ipath_segarray *map[0]; /* the segments */
-};
-
-/*
- * These keep track of the copy progress within a memory region.
- * Used by the verbs layer.
- */
-struct ipath_sge {
- struct ipath_mregion *mr;
- void *vaddr; /* kernel virtual address of segment */
- u32 sge_length; /* length of the SGE */
- u32 length; /* remaining length of the segment */
- u16 m; /* current index: mr->map[m] */
- u16 n; /* current index: mr->map[m]->segs[n] */
-};
-
-/* Memory region */
-struct ipath_mr {
- struct ib_mr ibmr;
- struct ib_umem *umem;
- struct ipath_mregion mr; /* must be last */
-};
-
-/*
- * Send work request queue entry.
- * The size of the sg_list is determined when the QP is created and stored
- * in qp->s_max_sge.
- */
-struct ipath_swqe {
- union {
- struct ib_send_wr wr; /* don't use wr.sg_list */
- struct ib_ud_wr ud_wr;
- struct ib_rdma_wr rdma_wr;
- struct ib_atomic_wr atomic_wr;
- };
-
- u32 psn; /* first packet sequence number */
- u32 lpsn; /* last packet sequence number */
- u32 ssn; /* send sequence number */
- u32 length; /* total length of data in sg_list */
- struct ipath_sge sg_list[0];
-};
-
-/*
- * Receive work request queue entry.
- * The size of the sg_list is determined when the QP (or SRQ) is created
- * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
- */
-struct ipath_rwqe {
- u64 wr_id;
- u8 num_sge;
- struct ib_sge sg_list[0];
-};
-
-/*
- * This structure is used to contain the head pointer, tail pointer,
- * and receive work queue entries as a single memory allocation so
- * it can be mmap'ed into user space.
- * Note that the wq array elements are variable size so you can't
- * just index into the array to get the N'th element;
- * use get_rwqe_ptr() instead.
- */
-struct ipath_rwq {
- u32 head; /* new work requests posted to the head */
- u32 tail; /* receives pull requests from here. */
- struct ipath_rwqe wq[0];
-};
-
-struct ipath_rq {
- struct ipath_rwq *wq;
- spinlock_t lock;
- u32 size; /* size of RWQE array */
- u8 max_sge;
-};
-
-struct ipath_srq {
- struct ib_srq ibsrq;
- struct ipath_rq rq;
- struct ipath_mmap_info *ip;
- /* send signal when number of RWQEs < limit */
- u32 limit;
-};
-
-struct ipath_sge_state {
- struct ipath_sge *sg_list; /* next SGE to be used if any */
- struct ipath_sge sge; /* progress state for the current SGE */
- u8 num_sge;
- u8 static_rate;
-};
-
-/*
- * This structure holds the information that the send tasklet needs
- * to send a RDMA read response or atomic operation.
- */
-struct ipath_ack_entry {
- u8 opcode;
- u8 sent;
- u32 psn;
- union {
- struct ipath_sge_state rdma_sge;
- u64 atomic_data;
- };
-};
-
-/*
- * Variables prefixed with s_ are for the requester (sender).
- * Variables prefixed with r_ are for the responder (receiver).
- * Variables prefixed with ack_ are for responder replies.
- *
- * Common variables are protected by both r_rq.lock and s_lock in that order
- * which only happens in modify_qp() or changing the QP 'state'.
- */
-struct ipath_qp {
- struct ib_qp ibqp;
- struct ipath_qp *next; /* link list for QPN hash table */
- struct ipath_qp *timer_next; /* link list for ipath_ib_timer() */
- struct ipath_qp *pio_next; /* link for ipath_ib_piobufavail() */
- struct list_head piowait; /* link for wait PIO buf */
- struct list_head timerwait; /* link for waiting for timeouts */
- struct ib_ah_attr remote_ah_attr;
- struct ipath_ib_header s_hdr; /* next packet header to send */
- atomic_t refcount;
- wait_queue_head_t wait;
- wait_queue_head_t wait_dma;
- struct tasklet_struct s_task;
- struct ipath_mmap_info *ip;
- struct ipath_sge_state *s_cur_sge;
- struct ipath_verbs_txreq *s_tx;
- struct ipath_sge_state s_sge; /* current send request data */
- struct ipath_ack_entry s_ack_queue[IPATH_MAX_RDMA_ATOMIC + 1];
- struct ipath_sge_state s_ack_rdma_sge;
- struct ipath_sge_state s_rdma_read_sge;
- struct ipath_sge_state r_sge; /* current receive data */
- spinlock_t s_lock;
- atomic_t s_dma_busy;
- u16 s_pkt_delay;
- u16 s_hdrwords; /* size of s_hdr in 32 bit words */
- u32 s_cur_size; /* size of send packet in bytes */
- u32 s_len; /* total length of s_sge */
- u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
- u32 s_next_psn; /* PSN for next request */
- u32 s_last_psn; /* last response PSN processed */
- u32 s_psn; /* current packet sequence number */
- u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
- u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
- u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
- u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
- u64 r_wr_id; /* ID for current receive WQE */
- unsigned long r_aflags;
- u32 r_len; /* total length of r_sge */
- u32 r_rcv_len; /* receive data len processed */
- u32 r_psn; /* expected rcv packet sequence number */
- u32 r_msn; /* message sequence number */
- u8 state; /* QP state */
- u8 s_state; /* opcode of last packet sent */
- u8 s_ack_state; /* opcode of packet to ACK */
- u8 s_nak_state; /* non-zero if NAK is pending */
- u8 r_state; /* opcode of last packet received */
- u8 r_nak_state; /* non-zero if NAK is pending */
- u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
- u8 r_flags;
- u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
- u8 r_head_ack_queue; /* index into s_ack_queue[] */
- u8 qp_access_flags;
- u8 s_max_sge; /* size of s_wq->sg_list */
- u8 s_retry_cnt; /* number of times to retry */
- u8 s_rnr_retry_cnt;
- u8 s_retry; /* requester retry counter */
- u8 s_rnr_retry; /* requester RNR retry counter */
- u8 s_pkey_index; /* PKEY index to use */
- u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
- u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
- u8 s_tail_ack_queue; /* index into s_ack_queue[] */
- u8 s_flags;
- u8 s_dmult;
- u8 s_draining;
- u8 timeout; /* Timeout for this QP */
- enum ib_mtu path_mtu;
- u32 remote_qpn;
- u32 qkey; /* QKEY for this QP (for UD or RD) */
- u32 s_size; /* send work queue size */
- u32 s_head; /* new entries added here */
- u32 s_tail; /* next entry to process */
- u32 s_cur; /* current work queue entry */
- u32 s_last; /* last un-ACK'ed entry */
- u32 s_ssn; /* SSN of tail entry */
- u32 s_lsn; /* limit sequence number (credit) */
- struct ipath_swqe *s_wq; /* send work queue */
- struct ipath_swqe *s_wqe;
- struct ipath_sge *r_ud_sg_list;
- struct ipath_rq r_rq; /* receive work queue */
- struct ipath_sge r_sg_list[0]; /* verified SGEs */
-};
-
-/*
- * Atomic bit definitions for r_aflags.
- */
-#define IPATH_R_WRID_VALID 0
-
-/*
- * Bit definitions for r_flags.
- */
-#define IPATH_R_REUSE_SGE 0x01
-#define IPATH_R_RDMAR_SEQ 0x02
-
-/*
- * Bit definitions for s_flags.
- *
- * IPATH_S_FENCE_PENDING - waiting for all prior RDMA read or atomic SWQEs
- * before processing the next SWQE
- * IPATH_S_RDMAR_PENDING - waiting for any RDMA read or atomic SWQEs
- * before processing the next SWQE
- * IPATH_S_WAITING - waiting for RNR timeout or send buffer available.
- * IPATH_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
- * IPATH_S_WAIT_DMA - waiting for send DMA queue to drain before generating
- * next send completion entry not via send DMA.
- */
-#define IPATH_S_SIGNAL_REQ_WR 0x01
-#define IPATH_S_FENCE_PENDING 0x02
-#define IPATH_S_RDMAR_PENDING 0x04
-#define IPATH_S_ACK_PENDING 0x08
-#define IPATH_S_BUSY 0x10
-#define IPATH_S_WAITING 0x20
-#define IPATH_S_WAIT_SSN_CREDIT 0x40
-#define IPATH_S_WAIT_DMA 0x80
-
-#define IPATH_S_ANY_WAIT (IPATH_S_FENCE_PENDING | IPATH_S_RDMAR_PENDING | \
- IPATH_S_WAITING | IPATH_S_WAIT_SSN_CREDIT | IPATH_S_WAIT_DMA)
-
-#define IPATH_PSN_CREDIT 512
-
-/*
- * Since struct ipath_swqe is not a fixed size, we can't simply index into
- * struct ipath_qp.s_wq. This function does the array index computation.
- */
-static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp,
- unsigned n)
-{
- return (struct ipath_swqe *)((char *)qp->s_wq +
- (sizeof(struct ipath_swqe) +
- qp->s_max_sge *
- sizeof(struct ipath_sge)) * n);
-}
-
-/*
- * Since struct ipath_rwqe is not a fixed size, we can't simply index into
- * struct ipath_rwq.wq. This function does the array index computation.
- */
-static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq,
- unsigned n)
-{
- return (struct ipath_rwqe *)
- ((char *) rq->wq->wq +
- (sizeof(struct ipath_rwqe) +
- rq->max_sge * sizeof(struct ib_sge)) * n);
-}
-
-/*
- * QPN-map pages start out as NULL, they get allocated upon
- * first use and are never deallocated. This way,
- * large bitmaps are not allocated unless large numbers of QPs are used.
- */
-struct qpn_map {
- atomic_t n_free;
- void *page;
-};
-
-struct ipath_qp_table {
- spinlock_t lock;
- u32 last; /* last QP number allocated */
- u32 max; /* size of the hash table */
- u32 nmaps; /* size of the map table */
- struct ipath_qp **table;
- /* bit map of free numbers */
- struct qpn_map map[QPNMAP_ENTRIES];
-};
-
-struct ipath_lkey_table {
- spinlock_t lock;
- u32 next; /* next unused index (speeds search) */
- u32 gen; /* generation count */
- u32 max; /* size of the table */
- struct ipath_mregion **table;
-};
-
-struct ipath_opcode_stats {
- u64 n_packets; /* number of packets */
- u64 n_bytes; /* total number of bytes */
-};
-
-struct ipath_ibdev {
- struct ib_device ibdev;
- struct ipath_devdata *dd;
- struct list_head pending_mmaps;
- spinlock_t mmap_offset_lock;
- u32 mmap_offset;
- int ib_unit; /* This is the device number */
- u16 sm_lid; /* in host order */
- u8 sm_sl;
- u8 mkeyprot;
- /* non-zero when timer is set */
- unsigned long mkey_lease_timeout;
-
- /* The following fields are really per port. */
- struct ipath_qp_table qp_table;
- struct ipath_lkey_table lk_table;
- struct list_head pending[3]; /* FIFO of QPs waiting for ACKs */
- struct list_head piowait; /* list for wait PIO buf */
- struct list_head txreq_free;
- void *txreq_bufs;
- /* list of QPs waiting for RNR timer */
- struct list_head rnrwait;
- spinlock_t pending_lock;
- __be64 sys_image_guid; /* in network order */
- __be64 gid_prefix; /* in network order */
- __be64 mkey;
-
- u32 n_pds_allocated; /* number of PDs allocated for device */
- spinlock_t n_pds_lock;
- u32 n_ahs_allocated; /* number of AHs allocated for device */
- spinlock_t n_ahs_lock;
- u32 n_cqs_allocated; /* number of CQs allocated for device */
- spinlock_t n_cqs_lock;
- u32 n_qps_allocated; /* number of QPs allocated for device */
- spinlock_t n_qps_lock;
- u32 n_srqs_allocated; /* number of SRQs allocated for device */
- spinlock_t n_srqs_lock;
- u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
- spinlock_t n_mcast_grps_lock;
-
- u64 ipath_sword; /* total dwords sent (sample result) */
- u64 ipath_rword; /* total dwords received (sample result) */
- u64 ipath_spkts; /* total packets sent (sample result) */
- u64 ipath_rpkts; /* total packets received (sample result) */
- /* # of ticks no data sent (sample result) */
- u64 ipath_xmit_wait;
- u64 rcv_errors; /* # of packets with SW detected rcv errs */
- u64 n_unicast_xmit; /* total unicast packets sent */
- u64 n_unicast_rcv; /* total unicast packets received */
- u64 n_multicast_xmit; /* total multicast packets sent */
- u64 n_multicast_rcv; /* total multicast packets received */
- u64 z_symbol_error_counter; /* starting count for PMA */
- u64 z_link_error_recovery_counter; /* starting count for PMA */
- u64 z_link_downed_counter; /* starting count for PMA */
- u64 z_port_rcv_errors; /* starting count for PMA */
- u64 z_port_rcv_remphys_errors; /* starting count for PMA */
- u64 z_port_xmit_discards; /* starting count for PMA */
- u64 z_port_xmit_data; /* starting count for PMA */
- u64 z_port_rcv_data; /* starting count for PMA */
- u64 z_port_xmit_packets; /* starting count for PMA */
- u64 z_port_rcv_packets; /* starting count for PMA */
- u32 z_pkey_violations; /* starting count for PMA */
- u32 z_local_link_integrity_errors; /* starting count for PMA */
- u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */
- u32 z_vl15_dropped; /* starting count for PMA */
- u32 n_rc_resends;
- u32 n_rc_acks;
- u32 n_rc_qacks;
- u32 n_seq_naks;
- u32 n_rdma_seq;
- u32 n_rnr_naks;
- u32 n_other_naks;
- u32 n_timeouts;
- u32 n_pkt_drops;
- u32 n_vl15_dropped;
- u32 n_wqe_errs;
- u32 n_rdma_dup_busy;
- u32 n_piowait;
- u32 n_unaligned;
- u32 port_cap_flags;
- u32 pma_sample_start;
- u32 pma_sample_interval;
- __be16 pma_counter_select[5];
- u16 pma_tag;
- u16 qkey_violations;
- u16 mkey_violations;
- u16 mkey_lease_period;
- u16 pending_index; /* which pending queue is active */
- u8 pma_sample_status;
- u8 subnet_timeout;
- u8 vl_high_limit;
- struct ipath_opcode_stats opstats[128];
-};
-
-struct ipath_verbs_counters {
- u64 symbol_error_counter;
- u64 link_error_recovery_counter;
- u64 link_downed_counter;
- u64 port_rcv_errors;
- u64 port_rcv_remphys_errors;
- u64 port_xmit_discards;
- u64 port_xmit_data;
- u64 port_rcv_data;
- u64 port_xmit_packets;
- u64 port_rcv_packets;
- u32 local_link_integrity_errors;
- u32 excessive_buffer_overrun_errors;
- u32 vl15_dropped;
-};
-
-struct ipath_verbs_txreq {
- struct ipath_qp *qp;
- struct ipath_swqe *wqe;
- u32 map_len;
- u32 len;
- struct ipath_sge_state *ss;
- struct ipath_pio_header hdr;
- struct ipath_sdma_txreq txreq;
-};
-
-static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
-{
- return container_of(ibmr, struct ipath_mr, ibmr);
-}
-
-static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd)
-{
- return container_of(ibpd, struct ipath_pd, ibpd);
-}
-
-static inline struct ipath_ah *to_iah(struct ib_ah *ibah)
-{
- return container_of(ibah, struct ipath_ah, ibah);
-}
-
-static inline struct ipath_cq *to_icq(struct ib_cq *ibcq)
-{
- return container_of(ibcq, struct ipath_cq, ibcq);
-}
-
-static inline struct ipath_srq *to_isrq(struct ib_srq *ibsrq)
-{
- return container_of(ibsrq, struct ipath_srq, ibsrq);
-}
-
-static inline struct ipath_qp *to_iqp(struct ib_qp *ibqp)
-{
- return container_of(ibqp, struct ipath_qp, ibqp);
-}
-
-static inline struct ipath_ibdev *to_idev(struct ib_device *ibdev)
-{
- return container_of(ibdev, struct ipath_ibdev, ibdev);
-}
-
-/*
- * This must be called with s_lock held.
- */
-static inline void ipath_schedule_send(struct ipath_qp *qp)
-{
- if (qp->s_flags & IPATH_S_ANY_WAIT)
- qp->s_flags &= ~IPATH_S_ANY_WAIT;
- if (!(qp->s_flags & IPATH_S_BUSY))
- tasklet_hi_schedule(&qp->s_task);
-}
-
-int ipath_process_mad(struct ib_device *ibdev,
- int mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
-
-/*
- * Compare the lower 24 bits of the two values.
- * Returns an integer <, ==, or > than zero.
- */
-static inline int ipath_cmp24(u32 a, u32 b)
-{
- return (((int) a) - ((int) b)) << 8;
-}
-
-struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid);
-
-int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
- u64 *rwords, u64 *spkts, u64 *rpkts,
- u64 *xmit_wait);
-
-int ipath_get_counters(struct ipath_devdata *dd,
- struct ipath_verbs_counters *cntrs);
-
-int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
-
-int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
-
-int ipath_mcast_tree_empty(void);
-
-__be32 ipath_compute_aeth(struct ipath_qp *qp);
-
-struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn);
-
-struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata);
-
-int ipath_destroy_qp(struct ib_qp *ibqp);
-
-int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err);
-
-int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata);
-
-int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_qp_init_attr *init_attr);
-
-unsigned ipath_free_all_qps(struct ipath_qp_table *qpt);
-
-int ipath_init_qp_table(struct ipath_ibdev *idev, int size);
-
-void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
-
-unsigned ipath_ib_rate_to_mult(enum ib_rate rate);
-
-int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
- u32 hdrwords, struct ipath_sge_state *ss, u32 len);
-
-void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length);
-
-void ipath_skip_sge(struct ipath_sge_state *ss, u32 length);
-
-void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
-
-void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
-
-void ipath_restart_rc(struct ipath_qp *qp, u32 psn);
-
-void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err);
-
-int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr);
-
-void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
-
-int ipath_alloc_lkey(struct ipath_lkey_table *rkt,
- struct ipath_mregion *mr);
-
-void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey);
-
-int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
- struct ib_sge *sge, int acc);
-
-int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
- u32 len, u64 vaddr, u32 rkey, int acc);
-
-int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
-
-struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
- struct ib_srq_init_attr *srq_init_attr,
- struct ib_udata *udata);
-
-int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
- enum ib_srq_attr_mask attr_mask,
- struct ib_udata *udata);
-
-int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
-
-int ipath_destroy_srq(struct ib_srq *ibsrq);
-
-void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
-
-int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
-
-struct ib_cq *ipath_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata);
-
-int ipath_destroy_cq(struct ib_cq *ibcq);
-
-int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
-
-int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
-
-struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc);
-
-struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt_addr, int mr_access_flags,
- struct ib_udata *udata);
-
-int ipath_dereg_mr(struct ib_mr *ibmr);
-
-struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr);
-
-int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
- int list_len, u64 iova);
-
-int ipath_unmap_fmr(struct list_head *fmr_list);
-
-int ipath_dealloc_fmr(struct ib_fmr *ibfmr);
-
-void ipath_release_mmap_info(struct kref *ref);
-
-struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
- u32 size,
- struct ib_ucontext *context,
- void *obj);
-
-void ipath_update_mmap_info(struct ipath_ibdev *dev,
- struct ipath_mmap_info *ip,
- u32 size, void *obj);
-
-int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
-
-void ipath_insert_rnr_queue(struct ipath_qp *qp);
-
-int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
- u32 *lengthp, struct ipath_sge_state *ss);
-
-int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only);
-
-u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
- struct ib_global_route *grh, u32 hwords, u32 nwords);
-
-void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,
- struct ipath_other_headers *ohdr,
- u32 bth0, u32 bth2);
-
-void ipath_do_send(unsigned long data);
-
-void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
- enum ib_wc_status status);
-
-int ipath_make_rc_req(struct ipath_qp *qp);
-
-int ipath_make_uc_req(struct ipath_qp *qp);
-
-int ipath_make_ud_req(struct ipath_qp *qp);
-
-int ipath_register_ib_device(struct ipath_devdata *);
-
-void ipath_unregister_ib_device(struct ipath_ibdev *);
-
-void ipath_ib_rcv(struct ipath_ibdev *, void *, void *, u32);
-
-int ipath_ib_piobufavail(struct ipath_ibdev *);
-
-unsigned ipath_get_npkeys(struct ipath_devdata *);
-
-u32 ipath_get_cr_errpkey(struct ipath_devdata *);
-
-unsigned ipath_get_pkey(struct ipath_devdata *, unsigned);
-
-extern const enum ib_wc_opcode ib_ipath_wc_opcode[];
-
-/*
- * Below converts HCA-specific LinkTrainingState to IB PhysPortState
- * values.
- */
-extern const u8 ipath_cvt_physportstate[];
-#define IB_PHYSPORTSTATE_SLEEP 1
-#define IB_PHYSPORTSTATE_POLL 2
-#define IB_PHYSPORTSTATE_DISABLED 3
-#define IB_PHYSPORTSTATE_CFG_TRAIN 4
-#define IB_PHYSPORTSTATE_LINKUP 5
-#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
-
-extern const int ib_ipath_state_ops[];
-
-extern unsigned int ib_ipath_lkey_table_size;
-
-extern unsigned int ib_ipath_max_cqes;
-
-extern unsigned int ib_ipath_max_cqs;
-
-extern unsigned int ib_ipath_max_qp_wrs;
-
-extern unsigned int ib_ipath_max_qps;
-
-extern unsigned int ib_ipath_max_sges;
-
-extern unsigned int ib_ipath_max_mcast_grps;
-
-extern unsigned int ib_ipath_max_mcast_qp_attached;
-
-extern unsigned int ib_ipath_max_srqs;
-
-extern unsigned int ib_ipath_max_srq_sges;
-
-extern unsigned int ib_ipath_max_srq_wrs;
-
-extern const u32 ib_ipath_rnr_table[];
-
-extern struct ib_dma_mapping_ops ipath_dma_mapping_ops;
-
-#endif /* IPATH_VERBS_H */
diff --git a/drivers/staging/rdma/ipath/ipath_verbs_mcast.c b/drivers/staging/rdma/ipath/ipath_verbs_mcast.c
deleted file mode 100644
index 72d476fa5b8f..000000000000
--- a/drivers/staging/rdma/ipath/ipath_verbs_mcast.c
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/rculist.h>
-#include <linux/slab.h>
-
-#include "ipath_verbs.h"
-
-/*
- * Global table of GID to attached QPs.
- * The table is global to all ipath devices since a send from one QP/device
- * needs to be locally routed to any locally attached QPs on the same
- * or different device.
- */
-static struct rb_root mcast_tree;
-static DEFINE_SPINLOCK(mcast_lock);
-
-/**
- * ipath_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
- * @qp: the QP to link
- */
-static struct ipath_mcast_qp *ipath_mcast_qp_alloc(struct ipath_qp *qp)
-{
- struct ipath_mcast_qp *mqp;
-
- mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
- if (!mqp)
- goto bail;
-
- mqp->qp = qp;
- atomic_inc(&qp->refcount);
-
-bail:
- return mqp;
-}
-
-static void ipath_mcast_qp_free(struct ipath_mcast_qp *mqp)
-{
- struct ipath_qp *qp = mqp->qp;
-
- /* Notify ipath_destroy_qp() if it is waiting. */
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
-
- kfree(mqp);
-}
-
-/**
- * ipath_mcast_alloc - allocate the multicast GID structure
- * @mgid: the multicast GID
- *
- * A list of QPs will be attached to this structure.
- */
-static struct ipath_mcast *ipath_mcast_alloc(union ib_gid *mgid)
-{
- struct ipath_mcast *mcast;
-
- mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
- if (!mcast)
- goto bail;
-
- mcast->mgid = *mgid;
- INIT_LIST_HEAD(&mcast->qp_list);
- init_waitqueue_head(&mcast->wait);
- atomic_set(&mcast->refcount, 0);
- mcast->n_attached = 0;
-
-bail:
- return mcast;
-}
-
-static void ipath_mcast_free(struct ipath_mcast *mcast)
-{
- struct ipath_mcast_qp *p, *tmp;
-
- list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
- ipath_mcast_qp_free(p);
-
- kfree(mcast);
-}
-
-/**
- * ipath_mcast_find - search the global table for the given multicast GID
- * @mgid: the multicast GID to search for
- *
- * Returns NULL if not found.
- *
- * The caller is responsible for decrementing the reference count if found.
- */
-struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid)
-{
- struct rb_node *n;
- unsigned long flags;
- struct ipath_mcast *mcast;
-
- spin_lock_irqsave(&mcast_lock, flags);
- n = mcast_tree.rb_node;
- while (n) {
- int ret;
-
- mcast = rb_entry(n, struct ipath_mcast, rb_node);
-
- ret = memcmp(mgid->raw, mcast->mgid.raw,
- sizeof(union ib_gid));
- if (ret < 0)
- n = n->rb_left;
- else if (ret > 0)
- n = n->rb_right;
- else {
- atomic_inc(&mcast->refcount);
- spin_unlock_irqrestore(&mcast_lock, flags);
- goto bail;
- }
- }
- spin_unlock_irqrestore(&mcast_lock, flags);
-
- mcast = NULL;
-
-bail:
- return mcast;
-}
-
-/**
- * ipath_mcast_add - insert mcast GID into table and attach QP struct
- * @mcast: the mcast GID table
- * @mqp: the QP to attach
- *
- * Return zero if both were added. Return EEXIST if the GID was already in
- * the table but the QP was added. Return ESRCH if the QP was already
- * attached and neither structure was added.
- */
-static int ipath_mcast_add(struct ipath_ibdev *dev,
- struct ipath_mcast *mcast,
- struct ipath_mcast_qp *mqp)
-{
- struct rb_node **n = &mcast_tree.rb_node;
- struct rb_node *pn = NULL;
- int ret;
-
- spin_lock_irq(&mcast_lock);
-
- while (*n) {
- struct ipath_mcast *tmcast;
- struct ipath_mcast_qp *p;
-
- pn = *n;
- tmcast = rb_entry(pn, struct ipath_mcast, rb_node);
-
- ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
- sizeof(union ib_gid));
- if (ret < 0) {
- n = &pn->rb_left;
- continue;
- }
- if (ret > 0) {
- n = &pn->rb_right;
- continue;
- }
-
- /* Search the QP list to see if this is already there. */
- list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
- if (p->qp == mqp->qp) {
- ret = ESRCH;
- goto bail;
- }
- }
- if (tmcast->n_attached == ib_ipath_max_mcast_qp_attached) {
- ret = ENOMEM;
- goto bail;
- }
-
- tmcast->n_attached++;
-
- list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
- ret = EEXIST;
- goto bail;
- }
-
- spin_lock(&dev->n_mcast_grps_lock);
- if (dev->n_mcast_grps_allocated == ib_ipath_max_mcast_grps) {
- spin_unlock(&dev->n_mcast_grps_lock);
- ret = ENOMEM;
- goto bail;
- }
-
- dev->n_mcast_grps_allocated++;
- spin_unlock(&dev->n_mcast_grps_lock);
-
- mcast->n_attached++;
-
- list_add_tail_rcu(&mqp->list, &mcast->qp_list);
-
- atomic_inc(&mcast->refcount);
- rb_link_node(&mcast->rb_node, pn, n);
- rb_insert_color(&mcast->rb_node, &mcast_tree);
-
- ret = 0;
-
-bail:
- spin_unlock_irq(&mcast_lock);
-
- return ret;
-}
-
-int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- struct ipath_qp *qp = to_iqp(ibqp);
- struct ipath_ibdev *dev = to_idev(ibqp->device);
- struct ipath_mcast *mcast;
- struct ipath_mcast_qp *mqp;
- int ret;
-
- /*
- * Allocate data structures since its better to do this outside of
- * spin locks and it will most likely be needed.
- */
- mcast = ipath_mcast_alloc(gid);
- if (mcast == NULL) {
- ret = -ENOMEM;
- goto bail;
- }
- mqp = ipath_mcast_qp_alloc(qp);
- if (mqp == NULL) {
- ipath_mcast_free(mcast);
- ret = -ENOMEM;
- goto bail;
- }
- switch (ipath_mcast_add(dev, mcast, mqp)) {
- case ESRCH:
- /* Neither was used: can't attach the same QP twice. */
- ipath_mcast_qp_free(mqp);
- ipath_mcast_free(mcast);
- ret = -EINVAL;
- goto bail;
- case EEXIST: /* The mcast wasn't used */
- ipath_mcast_free(mcast);
- break;
- case ENOMEM:
- /* Exceeded the maximum number of mcast groups. */
- ipath_mcast_qp_free(mqp);
- ipath_mcast_free(mcast);
- ret = -ENOMEM;
- goto bail;
- default:
- break;
- }
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- struct ipath_qp *qp = to_iqp(ibqp);
- struct ipath_ibdev *dev = to_idev(ibqp->device);
- struct ipath_mcast *mcast = NULL;
- struct ipath_mcast_qp *p, *tmp;
- struct rb_node *n;
- int last = 0;
- int ret;
-
- spin_lock_irq(&mcast_lock);
-
- /* Find the GID in the mcast table. */
- n = mcast_tree.rb_node;
- while (1) {
- if (n == NULL) {
- spin_unlock_irq(&mcast_lock);
- ret = -EINVAL;
- goto bail;
- }
-
- mcast = rb_entry(n, struct ipath_mcast, rb_node);
- ret = memcmp(gid->raw, mcast->mgid.raw,
- sizeof(union ib_gid));
- if (ret < 0)
- n = n->rb_left;
- else if (ret > 0)
- n = n->rb_right;
- else
- break;
- }
-
- /* Search the QP list. */
- list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
- if (p->qp != qp)
- continue;
- /*
- * We found it, so remove it, but don't poison the forward
- * link until we are sure there are no list walkers.
- */
- list_del_rcu(&p->list);
- mcast->n_attached--;
-
- /* If this was the last attached QP, remove the GID too. */
- if (list_empty(&mcast->qp_list)) {
- rb_erase(&mcast->rb_node, &mcast_tree);
- last = 1;
- }
- break;
- }
-
- spin_unlock_irq(&mcast_lock);
-
- if (p) {
- /*
- * Wait for any list walkers to finish before freeing the
- * list element.
- */
- wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
- ipath_mcast_qp_free(p);
- }
- if (last) {
- atomic_dec(&mcast->refcount);
- wait_event(mcast->wait, !atomic_read(&mcast->refcount));
- ipath_mcast_free(mcast);
- spin_lock_irq(&dev->n_mcast_grps_lock);
- dev->n_mcast_grps_allocated--;
- spin_unlock_irq(&dev->n_mcast_grps_lock);
- }
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-int ipath_mcast_tree_empty(void)
-{
- return mcast_tree.rb_node == NULL;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_wc_ppc64.c b/drivers/staging/rdma/ipath/ipath_wc_ppc64.c
deleted file mode 100644
index 1a7e20a75149..000000000000
--- a/drivers/staging/rdma/ipath/ipath_wc_ppc64.c
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file is conditionally built on PowerPC only. Otherwise weak symbol
- * versions of the functions exported from here are used.
- */
-
-#include "ipath_kernel.h"
-
-/**
- * ipath_enable_wc - enable write combining for MMIO writes to the device
- * @dd: infinipath device
- *
- * Nothing to do on PowerPC, so just return without error.
- */
-int ipath_enable_wc(struct ipath_devdata *dd)
-{
- return 0;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_wc_x86_64.c b/drivers/staging/rdma/ipath/ipath_wc_x86_64.c
deleted file mode 100644
index 7b6e4c843e19..000000000000
--- a/drivers/staging/rdma/ipath/ipath_wc_x86_64.c
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file is conditionally built on x86_64 only. Otherwise weak symbol
- * versions of the functions exported from here are used.
- */
-
-#include <linux/pci.h>
-#include <asm/processor.h>
-
-#include "ipath_kernel.h"
-
-/**
- * ipath_enable_wc - enable write combining for MMIO writes to the device
- * @dd: infinipath device
- *
- * This routine is x86_64-specific; it twiddles the CPU's MTRRs to enable
- * write combining.
- */
-int ipath_enable_wc(struct ipath_devdata *dd)
-{
- int ret = 0;
- u64 pioaddr, piolen;
- unsigned bits;
- const unsigned long addr = pci_resource_start(dd->pcidev, 0);
- const size_t len = pci_resource_len(dd->pcidev, 0);
-
- /*
- * Set the PIO buffers to be WCCOMB, so we get HT bursts to the
- * chip. Linux (possibly the hardware) requires it to be on a power
- * of 2 address matching the length (which has to be a power of 2).
- * For rev1, that means the base address, for rev2, it will be just
- * the PIO buffers themselves.
- * For chips with two sets of buffers, the calculations are
- * somewhat more complicated; we need to sum, and the piobufbase
- * register has both offsets, 2K in low 32 bits, 4K in high 32 bits.
- * The buffers are still packed, so a single range covers both.
- */
- if (dd->ipath_piobcnt2k && dd->ipath_piobcnt4k) { /* 2 sizes */
- unsigned long pio2kbase, pio4kbase;
- pio2kbase = dd->ipath_piobufbase & 0xffffffffUL;
- pio4kbase = (dd->ipath_piobufbase >> 32) & 0xffffffffUL;
- if (pio2kbase < pio4kbase) { /* all, for now */
- pioaddr = addr + pio2kbase;
- piolen = pio4kbase - pio2kbase +
- dd->ipath_piobcnt4k * dd->ipath_4kalign;
- } else {
- pioaddr = addr + pio4kbase;
- piolen = pio2kbase - pio4kbase +
- dd->ipath_piobcnt2k * dd->ipath_palign;
- }
- } else { /* single buffer size (2K, currently) */
- pioaddr = addr + dd->ipath_piobufbase;
- piolen = dd->ipath_piobcnt2k * dd->ipath_palign +
- dd->ipath_piobcnt4k * dd->ipath_4kalign;
- }
-
- for (bits = 0; !(piolen & (1ULL << bits)); bits++)
- /* do nothing */ ;
-
- if (piolen != (1ULL << bits)) {
- piolen >>= bits;
- while (piolen >>= 1)
- bits++;
- piolen = 1ULL << (bits + 1);
- }
- if (pioaddr & (piolen - 1)) {
- u64 atmp;
- ipath_dbg("pioaddr %llx not on right boundary for size "
- "%llx, fixing\n",
- (unsigned long long) pioaddr,
- (unsigned long long) piolen);
- atmp = pioaddr & ~(piolen - 1);
- if (atmp < addr || (atmp + piolen) > (addr + len)) {
- ipath_dev_err(dd, "No way to align address/size "
- "(%llx/%llx), no WC mtrr\n",
- (unsigned long long) atmp,
- (unsigned long long) piolen << 1);
- ret = -ENODEV;
- } else {
- ipath_dbg("changing WC base from %llx to %llx, "
- "len from %llx to %llx\n",
- (unsigned long long) pioaddr,
- (unsigned long long) atmp,
- (unsigned long long) piolen,
- (unsigned long long) piolen << 1);
- pioaddr = atmp;
- piolen <<= 1;
- }
- }
-
- if (!ret) {
- dd->wc_cookie = arch_phys_wc_add(pioaddr, piolen);
- if (dd->wc_cookie < 0) {
- ipath_dev_err(dd, "Seting mtrr failed on PIO buffers\n");
- ret = -ENODEV;
- } else if (dd->wc_cookie == 0)
- ipath_cdbg(VERBOSE, "Set mtrr for chip to WC not needed\n");
- else
- ipath_cdbg(VERBOSE, "Set mtrr for chip to WC\n");
- }
-
- return ret;
-}
-
-/**
- * ipath_disable_wc - disable write combining for MMIO writes to the device
- * @dd: infinipath device
- */
-void ipath_disable_wc(struct ipath_devdata *dd)
-{
- arch_phys_wc_del(dd->wc_cookie);
-}
diff --git a/drivers/staging/speakup/Kconfig b/drivers/staging/speakup/Kconfig
index efd6f4560d3e..7e8037e230b8 100644
--- a/drivers/staging/speakup/Kconfig
+++ b/drivers/staging/speakup/Kconfig
@@ -1,7 +1,7 @@
menu "Speakup console speech"
config SPEAKUP
- depends on VT
+ depends on VT && !MN10300
tristate "Speakup core"
---help---
This is the Speakup screen reader. Think of it as a
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 63c59bc89b04..30cf973f326d 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -264,8 +264,9 @@ static struct notifier_block vt_notifier_block = {
.notifier_call = vt_notifier_call,
};
-static unsigned char get_attributes(u16 *pos)
+static unsigned char get_attributes(struct vc_data *vc, u16 *pos)
{
+ pos = screen_pos(vc, pos - (u16 *)vc->vc_origin, 1);
return (u_char) (scr_readw(pos) >> 8);
}
@@ -275,7 +276,7 @@ static void speakup_date(struct vc_data *vc)
spk_y = spk_cy = vc->vc_y;
spk_pos = spk_cp = vc->vc_pos;
spk_old_attr = spk_attr;
- spk_attr = get_attributes((u_short *) spk_pos);
+ spk_attr = get_attributes(vc, (u_short *)spk_pos);
}
static void bleep(u_short val)
@@ -469,8 +470,12 @@ static u16 get_char(struct vc_data *vc, u16 *pos, u_char *attribs)
u16 ch = ' ';
if (vc && pos) {
- u16 w = scr_readw(pos);
- u16 c = w & 0xff;
+ u16 w;
+ u16 c;
+
+ pos = screen_pos(vc, pos - (u16 *)vc->vc_origin, 1);
+ w = scr_readw(pos);
+ c = w & 0xff;
if (w & vc->vc_hi_font_mask)
c |= 0x100;
@@ -746,7 +751,7 @@ static int get_line(struct vc_data *vc)
u_char tmp2;
spk_old_attr = spk_attr;
- spk_attr = get_attributes((u_short *) spk_pos);
+ spk_attr = get_attributes(vc, (u_short *)spk_pos);
for (i = 0; i < vc->vc_cols; i++) {
buf[i] = (u_char) get_char(vc, (u_short *) tmp, &tmp2);
tmp += 2;
@@ -811,7 +816,7 @@ static int say_from_to(struct vc_data *vc, u_long from, u_long to,
u_short saved_punc_mask = spk_punc_mask;
spk_old_attr = spk_attr;
- spk_attr = get_attributes((u_short *) from);
+ spk_attr = get_attributes(vc, (u_short *)from);
while (from < to) {
buf[i++] = (char)get_char(vc, (u_short *) from, &tmp);
from += 2;
@@ -886,7 +891,7 @@ static int get_sentence_buf(struct vc_data *vc, int read_punc)
sentmarks[bn][0] = &sentbuf[bn][0];
i = 0;
spk_old_attr = spk_attr;
- spk_attr = get_attributes((u_short *) start);
+ spk_attr = get_attributes(vc, (u_short *)start);
while (start < end) {
sentbuf[bn][i] = (char)get_char(vc, (u_short *) start, &tmp);
@@ -1585,7 +1590,7 @@ static int count_highlight_color(struct vc_data *vc)
u16 *ptr;
for (ptr = start; ptr < end; ptr++) {
- ch = get_attributes(ptr);
+ ch = get_attributes(vc, ptr);
bg = (ch & 0x70) >> 4;
speakup_console[vc_num]->ht.bgcount[bg]++;
}
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index aa5ab6c80ed4..41ef099b7aa6 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -142,7 +142,9 @@ static void __speakup_paste_selection(struct work_struct *work)
struct tty_ldisc *ld;
DECLARE_WAITQUEUE(wait, current);
- ld = tty_ldisc_ref_wait(tty);
+ ld = tty_ldisc_ref(tty);
+ if (!ld)
+ goto tty_unref;
tty_buffer_lock_exclusive(&vc->port);
add_wait_queue(&vc->paste_wait, &wait);
@@ -162,6 +164,7 @@ static void __speakup_paste_selection(struct work_struct *work)
tty_buffer_unlock_exclusive(&vc->port);
tty_ldisc_deref(ld);
+tty_unref:
tty_kref_put(tty);
}
diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
index 3b5835b28128..a5bbb338f275 100644
--- a/drivers/staging/speakup/serialio.c
+++ b/drivers/staging/speakup/serialio.c
@@ -6,6 +6,11 @@
#include "spk_priv.h"
#include "serialio.h"
+#include <linux/serial_core.h>
+/* WARNING: Do not change this to <linux/serial.h> without testing that
+ * SERIAL_PORT_DFNS does get defined to the appropriate value. */
+#include <asm/serial.h>
+
#ifndef SERIAL_PORT_DFNS
#define SERIAL_PORT_DFNS
#endif
@@ -23,9 +28,15 @@ const struct old_serial_port *spk_serial_init(int index)
int baud = 9600, quot = 0;
unsigned int cval = 0;
int cflag = CREAD | HUPCL | CLOCAL | B9600 | CS8;
- const struct old_serial_port *ser = rs_table + index;
+ const struct old_serial_port *ser;
int err;
+ if (index >= ARRAY_SIZE(rs_table)) {
+ pr_info("no port info for ttyS%d\n", index);
+ return NULL;
+ }
+ ser = rs_table + index;
+
/* Divisor, bytesize and parity */
quot = ser->baud_base / baud;
cval = cflag & (CSIZE | CSTOPB);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index d9a5fc28fef4..b280abaad91b 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -269,16 +269,13 @@ static void n_tty_check_throttle(struct tty_struct *tty)
static void n_tty_check_unthrottle(struct tty_struct *tty)
{
- if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
- tty->link->ldisc->ops->write_wakeup == n_tty_write_wakeup) {
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY) {
if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE)
return;
if (!tty->count)
return;
n_tty_kick_worker(tty);
- n_tty_write_wakeup(tty->link);
- if (waitqueue_active(&tty->link->write_wait))
- wake_up_interruptible_poll(&tty->link->write_wait, POLLOUT);
+ tty_wakeup(tty->link);
return;
}
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 4097f3f65b3b..e71ec78fc11e 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1379,6 +1379,9 @@ ce4100_serial_setup(struct serial_private *priv,
#define PCI_DEVICE_ID_INTEL_BSW_UART1 0x228a
#define PCI_DEVICE_ID_INTEL_BSW_UART2 0x228c
+#define PCI_DEVICE_ID_INTEL_BDW_UART1 0x9ce3
+#define PCI_DEVICE_ID_INTEL_BDW_UART2 0x9ce4
+
#define BYT_PRV_CLK 0x800
#define BYT_PRV_CLK_EN (1 << 0)
#define BYT_PRV_CLK_M_VAL_SHIFT 1
@@ -1461,11 +1464,13 @@ byt_serial_setup(struct serial_private *priv,
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_BYT_UART1:
case PCI_DEVICE_ID_INTEL_BSW_UART1:
+ case PCI_DEVICE_ID_INTEL_BDW_UART1:
rx_param->src_id = 3;
tx_param->dst_id = 2;
break;
case PCI_DEVICE_ID_INTEL_BYT_UART2:
case PCI_DEVICE_ID_INTEL_BSW_UART2:
+ case PCI_DEVICE_ID_INTEL_BDW_UART2:
rx_param->src_id = 5;
tx_param->dst_id = 4;
break;
@@ -2062,6 +2067,20 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.subdevice = PCI_ANY_ID,
.setup = byt_serial_setup,
},
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BDW_UART1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = byt_serial_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BDW_UART2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = byt_serial_setup,
+ },
/*
* ITE
*/
@@ -5506,6 +5525,16 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
pbn_byt },
+ /* Intel Broadwell */
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_UART1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
+ pbn_byt },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_UART2,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
+ pbn_byt },
+
/*
* Intel Quark x1000
*/
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 892c92354745..5cec01c75691 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1463,13 +1463,13 @@ static int tty_reopen(struct tty_struct *tty)
{
struct tty_driver *driver = tty->driver;
- if (!tty->count)
- return -EIO;
-
if (driver->type == TTY_DRIVER_TYPE_PTY &&
driver->subtype == PTY_TYPE_MASTER)
return -EIO;
+ if (!tty->count)
+ return -EAGAIN;
+
if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
return -EBUSY;
@@ -2065,7 +2065,12 @@ retry_open:
if (tty) {
mutex_unlock(&tty_mutex);
- tty_lock(tty);
+ retval = tty_lock_interruptible(tty);
+ if (retval) {
+ if (retval == -EINTR)
+ retval = -ERESTARTSYS;
+ goto err_unref;
+ }
/* safe to drop the kref from tty_driver_lookup_tty() */
tty_kref_put(tty);
retval = tty_reopen(tty);
@@ -2083,7 +2088,11 @@ retry_open:
if (IS_ERR(tty)) {
retval = PTR_ERR(tty);
- goto err_file;
+ if (retval != -EAGAIN || signal_pending(current))
+ goto err_file;
+ tty_free_file(filp);
+ schedule();
+ goto retry_open;
}
tty_add_file(tty, filp);
@@ -2152,6 +2161,7 @@ retry_open:
return 0;
err_unlock:
mutex_unlock(&tty_mutex);
+err_unref:
/* after locks to avoid deadlock */
if (!IS_ERR_OR_NULL(driver))
tty_driver_kref_put(driver);
@@ -2649,6 +2659,28 @@ static int tiocsetd(struct tty_struct *tty, int __user *p)
}
/**
+ * tiocgetd - get line discipline
+ * @tty: tty device
+ * @p: pointer to user data
+ *
+ * Retrieves the line discipline id directly from the ldisc.
+ *
+ * Locking: waits for ldisc reference (in case the line discipline
+ * is changing or the tty is being hungup)
+ */
+
+static int tiocgetd(struct tty_struct *tty, int __user *p)
+{
+ struct tty_ldisc *ld;
+ int ret;
+
+ ld = tty_ldisc_ref_wait(tty);
+ ret = put_user(ld->ops->num, p);
+ tty_ldisc_deref(ld);
+ return ret;
+}
+
+/**
* send_break - performed time break
* @tty: device to break on
* @duration: timeout in mS
@@ -2874,7 +2906,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case TIOCGSID:
return tiocgsid(tty, real_tty, p);
case TIOCGETD:
- return put_user(tty->ldisc->ops->num, (int __user *)p);
+ return tiocgetd(tty, p);
case TIOCSETD:
return tiocsetd(tty, p);
case TIOCVHANGUP:
diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
index 77703a391207..d2f3c4cd697f 100644
--- a/drivers/tty/tty_mutex.c
+++ b/drivers/tty/tty_mutex.c
@@ -19,6 +19,14 @@ void __lockfunc tty_lock(struct tty_struct *tty)
}
EXPORT_SYMBOL(tty_lock);
+int tty_lock_interruptible(struct tty_struct *tty)
+{
+ if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty))
+ return -EIO;
+ tty_kref_get(tty);
+ return mutex_lock_interruptible(&tty->legacy_mutex);
+}
+
void __lockfunc tty_unlock(struct tty_struct *tty)
{
if (WARN(tty->magic != TTY_MAGIC, "U Bad %p\n", tty))
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index e7cbc44eef57..bd51bdd0a7bf 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -4250,6 +4250,7 @@ unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed)
{
return screenpos(vc, 2 * w_offset, viewed);
}
+EXPORT_SYMBOL_GPL(screen_pos);
void getconsxy(struct vc_data *vc, unsigned char *p)
{
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 26ca4f910cb0..fa4e23930614 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -428,7 +428,8 @@ static void acm_read_bulk_callback(struct urb *urb)
set_bit(rb->index, &acm->read_urbs_free);
dev_dbg(&acm->data->dev, "%s - non-zero urb status: %d\n",
__func__, status);
- return;
+ if ((status != -ENOENT) || (urb->actual_length == 0))
+ return;
}
usb_mark_last_busy(acm->dev);
@@ -1404,6 +1405,8 @@ made_compressed_probe:
usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
NULL, acm->writesize, acm_write_bulk, snd);
snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ if (quirks & SEND_ZERO_PACKET)
+ snd->urb->transfer_flags |= URB_ZERO_PACKET;
snd->instance = acm;
}
@@ -1838,6 +1841,11 @@ static const struct usb_device_id acm_ids[] = {
},
#endif
+ /*Samsung phone in firmware update mode */
+ { USB_DEVICE(0x04e8, 0x685d),
+ .driver_info = IGNORE_DEVICE,
+ },
+
/* Exclude Infineon Flash Loader utility */
{ USB_DEVICE(0x058b, 0x0041),
.driver_info = IGNORE_DEVICE,
@@ -1861,6 +1869,10 @@ static const struct usb_device_id acm_ids[] = {
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_CDMA) },
+ { USB_DEVICE(0x1519, 0x0452), /* Intel 7260 modem */
+ .driver_info = SEND_ZERO_PACKET,
+ },
+
{ }
};
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index dd9af38e7cda..ccfaba9ab4e4 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -134,3 +134,4 @@ struct acm {
#define IGNORE_DEVICE BIT(5)
#define QUIRK_CONTROL_LINE_STATE BIT(6)
#define CLEAR_HALT_CONDITIONS BIT(7)
+#define SEND_ZERO_PACKET BIT(8)
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 51b436918f78..350dcd9af5d8 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -5401,7 +5401,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
}
bos = udev->bos;
- udev->bos = NULL;
for (i = 0; i < SET_CONFIG_TRIES; ++i) {
@@ -5494,8 +5493,11 @@ done:
usb_set_usb2_hardware_lpm(udev, 1);
usb_unlocked_enable_lpm(udev);
usb_enable_ltm(udev);
- usb_release_bos_descriptor(udev);
- udev->bos = bos;
+ /* release the new BOS descriptor allocated by hub_port_init() */
+ if (udev->bos != bos) {
+ usb_release_bos_descriptor(udev);
+ udev->bos = bos;
+ }
return 0;
re_enumerate:
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 39a0fa8a4c0a..e991d55914db 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -572,12 +572,6 @@ static bool dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host)
set = host ? GUSBCFG_FORCEHOSTMODE : GUSBCFG_FORCEDEVMODE;
clear = host ? GUSBCFG_FORCEDEVMODE : GUSBCFG_FORCEHOSTMODE;
- /*
- * If the force mode bit is already set, don't set it.
- */
- if ((gusbcfg & set) && !(gusbcfg & clear))
- return false;
-
gusbcfg &= ~clear;
gusbcfg |= set;
dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
@@ -3278,9 +3272,6 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
/**
* During device initialization, read various hardware configuration
* registers and interpret the contents.
- *
- * This should be called during driver probe. It will perform a core
- * soft reset in order to get the reset values of the parameters.
*/
int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
{
@@ -3288,7 +3279,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
unsigned width;
u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
u32 grxfsiz;
- int retval;
/*
* Attempt to ensure this device is really a DWC_otg Controller.
@@ -3308,10 +3298,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
- retval = dwc2_core_reset(hsotg);
- if (retval)
- return retval;
-
hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1);
hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2);
hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3);
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 510f787434b3..690b9fd98b55 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -530,7 +530,13 @@ static int dwc2_driver_probe(struct platform_device *dev)
if (retval)
return retval;
- /* Reset the controller and detect hardware config values */
+ /*
+ * Reset before dwc2_get_hwparams() then it could get power-on real
+ * reset value form registers.
+ */
+ dwc2_core_reset_and_force_dr_mode(hsotg);
+
+ /* Detect config values from hardware */
retval = dwc2_get_hwparams(hsotg);
if (retval)
goto error;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index af023a81a0b0..7d1dd82a95ac 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2789,6 +2789,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
dwc->gadget.speed = USB_SPEED_UNKNOWN;
dwc->gadget.sg_supported = true;
dwc->gadget.name = "dwc3-gadget";
+ dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG;
/*
* FIXME We might be setting max_speed to <SUPER, however versions
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index 04ce6b156b35..e0244fb3903d 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -112,12 +112,16 @@ static inline int xhci_find_next_ext_cap(void __iomem *base, u32 start, int id)
offset = start;
if (!start || start == XHCI_HCC_PARAMS_OFFSET) {
val = readl(base + XHCI_HCC_PARAMS_OFFSET);
+ if (val == ~0)
+ return 0;
offset = XHCI_HCC_EXT_CAPS(val) << 2;
if (!offset)
return 0;
};
do {
val = readl(base + offset);
+ if (val == ~0)
+ return 0;
if (XHCI_EXT_CAPS_ID(val) == id && offset != start)
return offset;
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index c30de7c39f44..73f763c4f5f5 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -275,8 +275,9 @@ static bool need_bw_sch(struct usb_host_endpoint *ep,
return false;
/*
- * for LS & FS periodic endpoints which its device don't attach
- * to TT are also ignored, root-hub will schedule them directly
+ * for LS & FS periodic endpoints which its device is not behind
+ * a TT are also ignored, root-hub will schedule them directly,
+ * but need set @bpkts field of endpoint context to 1.
*/
if (is_fs_or_ls(speed) && !has_tt)
return false;
@@ -339,8 +340,17 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)),
usb_endpoint_dir_in(&ep->desc), ep);
- if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT))
+ if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT)) {
+ /*
+ * set @bpkts to 1 if it is LS or FS periodic endpoint, and its
+ * device does not connected through an external HS hub
+ */
+ if (usb_endpoint_xfer_int(&ep->desc)
+ || usb_endpoint_xfer_isoc(&ep->desc))
+ ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(1));
+
return 0;
+ }
bw_index = get_bw_index(xhci, udev, ep);
sch_bw = &sch_array[bw_index];
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index c9ab6a44c34a..9532f5aef71b 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -696,9 +696,24 @@ static int xhci_mtk_remove(struct platform_device *dev)
}
#ifdef CONFIG_PM_SLEEP
+/*
+ * if ip sleep fails, and all clocks are disabled, access register will hang
+ * AHB bus, so stop polling roothubs to avoid regs access on bus suspend.
+ * and no need to check whether ip sleep failed or not; this will cause SPM
+ * to wake up system immediately after system suspend complete if ip sleep
+ * fails, it is what we wanted.
+ */
static int xhci_mtk_suspend(struct device *dev)
{
struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev);
+ struct usb_hcd *hcd = mtk->hcd;
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ xhci_dbg(xhci, "%s: stop port polling\n", __func__);
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ del_timer_sync(&hcd->rh_timer);
+ clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+ del_timer_sync(&xhci->shared_hcd->rh_timer);
xhci_mtk_host_disable(mtk);
xhci_mtk_phy_power_off(mtk);
@@ -710,11 +725,19 @@ static int xhci_mtk_suspend(struct device *dev)
static int xhci_mtk_resume(struct device *dev)
{
struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev);
+ struct usb_hcd *hcd = mtk->hcd;
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
usb_wakeup_disable(mtk);
xhci_mtk_clks_enable(mtk);
xhci_mtk_phy_power_on(mtk);
xhci_mtk_host_enable(mtk);
+
+ xhci_dbg(xhci, "%s: restart port polling\n", __func__);
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ usb_hcd_poll_rh_status(hcd);
+ set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+ usb_hcd_poll_rh_status(xhci->shared_hcd);
return 0;
}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 58c43ed7ff3b..f0640b7a1c42 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -28,7 +28,9 @@
#include "xhci.h"
#include "xhci-trace.h"
-#define PORT2_SSIC_CONFIG_REG2 0x883c
+#define SSIC_PORT_NUM 2
+#define SSIC_PORT_CFG2 0x880c
+#define SSIC_PORT_CFG2_OFFSET 0x30
#define PROG_DONE (1 << 30)
#define SSIC_PORT_UNUSED (1 << 31)
@@ -45,6 +47,7 @@
#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
+#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
static const char hcd_name[] = "xhci_hcd";
@@ -151,9 +154,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) {
+ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
xhci->quirks |= XHCI_PME_STUCK_QUIRK;
}
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
+ xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
+ }
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
pdev->device == PCI_DEVICE_ID_EJ168) {
xhci->quirks |= XHCI_RESET_ON_RESUME;
@@ -312,22 +320,20 @@ static void xhci_pci_remove(struct pci_dev *dev)
* SSIC PORT need to be marked as "unused" before putting xHCI
* into D3. After D3 exit, the SSIC port need to be marked as "used".
* Without this change, xHCI might not enter D3 state.
- * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
- * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
*/
-static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
+static void xhci_ssic_port_unused_quirk(struct usb_hcd *hcd, bool suspend)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
u32 val;
void __iomem *reg;
+ int i;
- if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
- pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
-
- reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
+ for (i = 0; i < SSIC_PORT_NUM; i++) {
+ reg = (void __iomem *) xhci->cap_regs +
+ SSIC_PORT_CFG2 +
+ i * SSIC_PORT_CFG2_OFFSET;
- /* Notify SSIC that SSIC profile programming is not done */
+ /* Notify SSIC that SSIC profile programming is not done. */
val = readl(reg) & ~PROG_DONE;
writel(val, reg);
@@ -344,6 +350,17 @@ static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
writel(val, reg);
readl(reg);
}
+}
+
+/*
+ * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
+ * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
+ */
+static void xhci_pme_quirk(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ void __iomem *reg;
+ u32 val;
reg = (void __iomem *) xhci->cap_regs + 0x80a4;
val = readl(reg);
@@ -355,6 +372,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+ int ret;
/*
* Systems with the TI redriver that loses port status change events
@@ -364,9 +382,16 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
pdev->no_d3cold = true;
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
- xhci_pme_quirk(hcd, true);
+ xhci_pme_quirk(hcd);
+
+ if (xhci->quirks & XHCI_SSIC_PORT_UNUSED)
+ xhci_ssic_port_unused_quirk(hcd, true);
- return xhci_suspend(xhci, do_wakeup);
+ ret = xhci_suspend(xhci, do_wakeup);
+ if (ret && (xhci->quirks & XHCI_SSIC_PORT_UNUSED))
+ xhci_ssic_port_unused_quirk(hcd, false);
+
+ return ret;
}
static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
@@ -396,8 +421,11 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
if (pdev->vendor == PCI_VENDOR_ID_INTEL)
usb_enable_intel_xhci_ports(pdev);
+ if (xhci->quirks & XHCI_SSIC_PORT_UNUSED)
+ xhci_ssic_port_unused_quirk(hcd, false);
+
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
- xhci_pme_quirk(hcd, false);
+ xhci_pme_quirk(hcd);
retval = xhci_resume(xhci, hibernated);
return retval;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 770b6b088797..d39d6bf1d090 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -184,7 +184,8 @@ static int xhci_plat_probe(struct platform_device *pdev)
struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
/* Just copy data for now */
- *priv = *priv_match;
+ if (priv_match)
+ *priv = *priv_match;
}
if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_MARVELL_ARMADA)) {
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index f1c21c40b4a6..3915657e6078 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2193,10 +2193,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
}
/* Fast path - was this the last TRB in the TD for this URB? */
} else if (event_trb == td->last_trb) {
- if (td->urb_length_set && trb_comp_code == COMP_SHORT_TX)
- return finish_td(xhci, td, event_trb, event, ep,
- status, false);
-
if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
td->urb->actual_length =
td->urb->transfer_buffer_length -
@@ -2248,12 +2244,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
td->urb->actual_length +=
TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
-
- if (trb_comp_code == COMP_SHORT_TX) {
- xhci_dbg(xhci, "mid bulk/intr SP, wait for last TRB event\n");
- td->urb_length_set = true;
- return 0;
- }
}
return finish_td(xhci, td, event_trb, event, ep, status, false);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 26a44c0e969e..0c8087d3c313 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1554,7 +1554,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"HW died, freeing TD.");
urb_priv = urb->hcpriv;
- for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
+ for (i = urb_priv->td_cnt;
+ i < urb_priv->length && xhci->devs[urb->dev->slot_id];
+ i++) {
td = urb_priv->td[i];
if (!list_empty(&td->td_list))
list_del_init(&td->td_list);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 9be7348872ba..cc651383ce5a 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1631,6 +1631,7 @@ struct xhci_hcd {
#define XHCI_BROKEN_STREAMS (1 << 19)
#define XHCI_PME_STUCK_QUIRK (1 << 20)
#define XHCI_MTK_HOST (1 << 21)
+#define XHCI_SSIC_PORT_UNUSED (1 << 22)
unsigned int num_active_eps;
unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index b2685e75a683..3eaa4ba6867d 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -348,7 +348,9 @@ static int ux500_suspend(struct device *dev)
struct ux500_glue *glue = dev_get_drvdata(dev);
struct musb *musb = glue_to_musb(glue);
- usb_phy_set_suspend(musb->xceiv, 1);
+ if (musb)
+ usb_phy_set_suspend(musb->xceiv, 1);
+
clk_disable_unprepare(glue->clk);
return 0;
@@ -366,7 +368,8 @@ static int ux500_resume(struct device *dev)
return ret;
}
- usb_phy_set_suspend(musb->xceiv, 0);
+ if (musb)
+ usb_phy_set_suspend(musb->xceiv, 0);
return 0;
}
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 0d19a6d61a71..970a30e155cb 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -1599,6 +1599,8 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
&motg->id.nb);
if (ret < 0) {
dev_err(&pdev->dev, "register ID notifier failed\n");
+ extcon_unregister_notifier(motg->vbus.extcon,
+ EXTCON_USB, &motg->vbus.nb);
return ret;
}
@@ -1660,15 +1662,6 @@ static int msm_otg_probe(struct platform_device *pdev)
if (!motg)
return -ENOMEM;
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- if (!np)
- return -ENXIO;
- ret = msm_otg_read_dt(pdev, motg);
- if (ret)
- return ret;
- }
-
motg->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
GFP_KERNEL);
if (!motg->phy.otg)
@@ -1710,6 +1703,15 @@ static int msm_otg_probe(struct platform_device *pdev)
if (!motg->regs)
return -ENOMEM;
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ if (!np)
+ return -ENXIO;
+ ret = msm_otg_read_dt(pdev, motg);
+ if (ret)
+ return ret;
+ }
+
/*
* NOTE: The PHYs can be multiplexed between the chipidea controller
* and the dwc3 controller, using a single bit. It is important that
@@ -1717,8 +1719,10 @@ static int msm_otg_probe(struct platform_device *pdev)
*/
if (motg->phy_number) {
phy_select = devm_ioremap_nocache(&pdev->dev, USB2_PHY_SEL, 4);
- if (!phy_select)
- return -ENOMEM;
+ if (!phy_select) {
+ ret = -ENOMEM;
+ goto unregister_extcon;
+ }
/* Enable second PHY with the OTG port */
writel(0x1, phy_select);
}
@@ -1728,7 +1732,8 @@ static int msm_otg_probe(struct platform_device *pdev)
motg->irq = platform_get_irq(pdev, 0);
if (motg->irq < 0) {
dev_err(&pdev->dev, "platform_get_irq failed\n");
- return motg->irq;
+ ret = motg->irq;
+ goto unregister_extcon;
}
regs[0].supply = "vddcx";
@@ -1737,7 +1742,7 @@ static int msm_otg_probe(struct platform_device *pdev)
ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs);
if (ret)
- return ret;
+ goto unregister_extcon;
motg->vddcx = regs[0].consumer;
motg->v3p3 = regs[1].consumer;
@@ -1834,6 +1839,12 @@ disable_clks:
clk_disable_unprepare(motg->clk);
if (!IS_ERR(motg->core_clk))
clk_disable_unprepare(motg->core_clk);
+unregister_extcon:
+ extcon_unregister_notifier(motg->id.extcon,
+ EXTCON_USB_HOST, &motg->id.nb);
+ extcon_unregister_notifier(motg->vbus.extcon,
+ EXTCON_USB, &motg->vbus.nb);
+
return ret;
}
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index c2936dc48ca7..00bfea01be65 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -220,7 +220,7 @@ static int mxs_phy_hw_init(struct mxs_phy *mxs_phy)
/* Return true if the vbus is there */
static bool mxs_phy_get_vbus_status(struct mxs_phy *mxs_phy)
{
- unsigned int vbus_value;
+ unsigned int vbus_value = 0;
if (!mxs_phy->regmap_anatop)
return false;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 9b90ad747d87..987813b8a7f9 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -99,6 +99,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
{ USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
{ USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
+ { USB_DEVICE(0x10C4, 0x81D7) }, /* IAI Corp. RCB-CV-USB USB to RS485 Adaptor */
{ USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
{ USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
{ USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index a5a0376bbd48..8c660ae401d8 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -824,6 +824,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, FTDI_TURTELIZER_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
+ { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_SCU18) },
{ USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
/* Papouch devices based on FTDI chip */
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 67c6d4469730..a84df2513994 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -615,6 +615,7 @@
*/
#define RATOC_VENDOR_ID 0x0584
#define RATOC_PRODUCT_ID_USB60F 0xb020
+#define RATOC_PRODUCT_ID_SCU18 0xb03a
/*
* Infineon Technologies
diff --git a/drivers/usb/serial/mxu11x0.c b/drivers/usb/serial/mxu11x0.c
index e3c3f57c2d82..619607323bfd 100644
--- a/drivers/usb/serial/mxu11x0.c
+++ b/drivers/usb/serial/mxu11x0.c
@@ -368,6 +368,16 @@ static int mxu1_port_probe(struct usb_serial_port *port)
return 0;
}
+static int mxu1_port_remove(struct usb_serial_port *port)
+{
+ struct mxu1_port *mxport;
+
+ mxport = usb_get_serial_port_data(port);
+ kfree(mxport);
+
+ return 0;
+}
+
static int mxu1_startup(struct usb_serial *serial)
{
struct mxu1_device *mxdev;
@@ -427,6 +437,14 @@ err_free_mxdev:
return err;
}
+static void mxu1_release(struct usb_serial *serial)
+{
+ struct mxu1_device *mxdev;
+
+ mxdev = usb_get_serial_data(serial);
+ kfree(mxdev);
+}
+
static int mxu1_write_byte(struct usb_serial_port *port, u32 addr,
u8 mask, u8 byte)
{
@@ -957,7 +975,9 @@ static struct usb_serial_driver mxu11x0_device = {
.id_table = mxu1_idtable,
.num_ports = 1,
.port_probe = mxu1_port_probe,
+ .port_remove = mxu1_port_remove,
.attach = mxu1_startup,
+ .release = mxu1_release,
.open = mxu1_open,
.close = mxu1_close,
.ioctl = mxu1_ioctl,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index f2280606b73c..db86e512e0fc 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -268,6 +268,8 @@ static void option_instat_callback(struct urb *urb);
#define TELIT_PRODUCT_CC864_SINGLE 0x1006
#define TELIT_PRODUCT_DE910_DUAL 0x1010
#define TELIT_PRODUCT_UE910_V2 0x1012
+#define TELIT_PRODUCT_LE922_USBCFG0 0x1042
+#define TELIT_PRODUCT_LE922_USBCFG3 0x1043
#define TELIT_PRODUCT_LE920 0x1200
#define TELIT_PRODUCT_LE910 0x1201
@@ -615,6 +617,16 @@ static const struct option_blacklist_info telit_le920_blacklist = {
.reserved = BIT(1) | BIT(5),
};
+static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
+ .sendsetup = BIT(2),
+ .reserved = BIT(0) | BIT(1) | BIT(3),
+};
+
+static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
+ .sendsetup = BIT(0),
+ .reserved = BIT(1) | BIT(2) | BIT(3),
+};
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1160,6 +1172,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
@@ -1679,7 +1695,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 60afb39eb73c..337a0be89fcf 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -544,6 +544,11 @@ static int treo_attach(struct usb_serial *serial)
(serial->num_interrupt_in == 0))
return 0;
+ if (serial->num_bulk_in < 2 || serial->num_interrupt_in < 2) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
/*
* It appears that Treos and Kyoceras want to use the
* 1st bulk in endpoint to communicate with the 2nd bulk out endpoint,
@@ -597,8 +602,10 @@ static int clie_5_attach(struct usb_serial *serial)
*/
/* some sanity check */
- if (serial->num_ports < 2)
- return -1;
+ if (serial->num_bulk_out < 2) {
+ dev_err(&serial->interface->dev, "missing bulk out endpoints\n");
+ return -ENODEV;
+ }
/* port 0 now uses the modified endpoint Address */
port = serial->port[0];
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 82f25cc1c460..ecca316386f5 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -123,8 +123,8 @@ struct iommu_group *vfio_iommu_group_get(struct device *dev)
/*
* With noiommu enabled, an IOMMU group will be created for a device
* that doesn't already have one and doesn't have an iommu_ops on their
- * bus. We use iommu_present() again in the main code to detect these
- * fake groups.
+ * bus. We set iommudata simply to be able to identify these groups
+ * as special use and for reclamation later.
*/
if (group || !noiommu || iommu_present(dev->bus))
return group;
@@ -134,6 +134,7 @@ struct iommu_group *vfio_iommu_group_get(struct device *dev)
return NULL;
iommu_group_set_name(group, "vfio-noiommu");
+ iommu_group_set_iommudata(group, &noiommu, NULL);
ret = iommu_group_add_device(group, dev);
iommu_group_put(group);
if (ret)
@@ -158,7 +159,7 @@ EXPORT_SYMBOL_GPL(vfio_iommu_group_get);
void vfio_iommu_group_put(struct iommu_group *group, struct device *dev)
{
#ifdef CONFIG_VFIO_NOIOMMU
- if (!iommu_present(dev->bus))
+ if (iommu_group_get_iommudata(group) == &noiommu)
iommu_group_remove_device(dev);
#endif
@@ -190,16 +191,10 @@ static long vfio_noiommu_ioctl(void *iommu_data,
return -ENOTTY;
}
-static int vfio_iommu_present(struct device *dev, void *unused)
-{
- return iommu_present(dev->bus) ? 1 : 0;
-}
-
static int vfio_noiommu_attach_group(void *iommu_data,
struct iommu_group *iommu_group)
{
- return iommu_group_for_each_dev(iommu_group, NULL,
- vfio_iommu_present) ? -EINVAL : 0;
+ return iommu_group_get_iommudata(iommu_group) == &noiommu ? 0 : -EINVAL;
}
static void vfio_noiommu_detach_group(void *iommu_data,
@@ -323,8 +318,7 @@ static void vfio_group_unlock_and_free(struct vfio_group *group)
/**
* Group objects - create, release, get, put, search
*/
-static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
- bool iommu_present)
+static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
{
struct vfio_group *group, *tmp;
struct device *dev;
@@ -342,7 +336,9 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
atomic_set(&group->container_users, 0);
atomic_set(&group->opened, 0);
group->iommu_group = iommu_group;
- group->noiommu = !iommu_present;
+#ifdef CONFIG_VFIO_NOIOMMU
+ group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu);
+#endif
group->nb.notifier_call = vfio_iommu_group_notifier;
@@ -767,7 +763,7 @@ int vfio_add_group_dev(struct device *dev,
group = vfio_group_get_from_iommu(iommu_group);
if (!group) {
- group = vfio_create_group(iommu_group, iommu_present(dev->bus));
+ group = vfio_create_group(iommu_group);
if (IS_ERR(group)) {
iommu_group_put(iommu_group);
return PTR_ERR(group);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 36205c27c4d0..f6bed86c17f9 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -545,6 +545,7 @@ err_enable_device:
static void virtio_pci_remove(struct pci_dev *pci_dev)
{
struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+ struct device *dev = get_device(&vp_dev->vdev.dev);
unregister_virtio_device(&vp_dev->vdev);
@@ -554,6 +555,7 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
virtio_pci_modern_remove(vp_dev);
pci_disable_device(pci_dev);
+ put_device(dev);
}
static struct pci_driver virtio_pci_driver = {
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 4f0e7be0da34..0f6d8515ba4f 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -145,7 +145,8 @@ config MENF21BMC_WATCHDOG
config TANGOX_WATCHDOG
tristate "Sigma Designs SMP86xx/SMP87xx watchdog"
select WATCHDOG_CORE
- depends on ARCH_TANGOX || COMPILE_TEST
+ depends on ARCH_TANGO || COMPILE_TEST
+ depends on HAS_IOMEM
help
Support for the watchdog in Sigma Designs SMP86xx (tango3)
and SMP87xx (tango4) family chips.
@@ -618,6 +619,7 @@ config DIGICOLOR_WATCHDOG
config LPC18XX_WATCHDOG
tristate "LPC18xx/43xx Watchdog"
depends on ARCH_LPC18XX || COMPILE_TEST
+ depends on HAS_IOMEM
select WATCHDOG_CORE
help
Say Y here if to include support for the watchdog timer
@@ -1374,6 +1376,7 @@ config BCM_KONA_WDT_DEBUG
config BCM7038_WDT
tristate "BCM7038 Watchdog"
select WATCHDOG_CORE
+ depends on HAS_IOMEM
help
Watchdog driver for the built-in hardware in Broadcom 7038 SoCs.
@@ -1383,6 +1386,7 @@ config IMGPDC_WDT
tristate "Imagination Technologies PDC Watchdog Timer"
depends on HAS_IOMEM
depends on METAG || MIPS || COMPILE_TEST
+ select WATCHDOG_CORE
help
Driver for Imagination Technologies PowerDown Controller
Watchdog Timer.
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index f36ca4be0720..ac5840d9689a 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -292,4 +292,4 @@ MODULE_PARM_DESC(nodelay,
"Force selection of a timeout setting without initial delay "
"(max6373/74 only, default=0)");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 1a11aedc4fe8..68952d9ccf83 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -608,7 +608,7 @@ static int usb_pcwd_probe(struct usb_interface *interface,
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
struct usb_pcwd_private *usb_pcwd = NULL;
- int pipe, maxp;
+ int pipe;
int retval = -ENOMEM;
int got_fw_rev;
unsigned char fw_rev_major, fw_rev_minor;
@@ -641,7 +641,6 @@ static int usb_pcwd_probe(struct usb_interface *interface,
/* get a handle to the interrupt data pipe */
pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
- maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
/* allocate memory for our device and initialize it */
usb_pcwd = kzalloc(sizeof(struct usb_pcwd_private), GFP_KERNEL);
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 01d816251302..e7a715e82021 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -139,12 +139,11 @@ static int wdt_config(struct watchdog_device *wdd, bool ping)
writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
writel_relaxed(wdt->load_val, wdt->base + WDTLOAD);
+ writel_relaxed(INT_MASK, wdt->base + WDTINTCLR);
- if (!ping) {
- writel_relaxed(INT_MASK, wdt->base + WDTINTCLR);
+ if (!ping)
writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base +
WDTCONTROL);
- }
writel_relaxed(LOCK, wdt->base + WDTLOCK);
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index 945fc4327201..4ac2ca8a7656 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -242,7 +242,7 @@ static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
}
-static struct cleancache_ops tmem_cleancache_ops = {
+static const struct cleancache_ops tmem_cleancache_ops = {
.put_page = tmem_cleancache_put_page,
.get_page = tmem_cleancache_get_page,
.invalidate_page = tmem_cleancache_flush_page,