summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
blob: f4ff124a1967dc9bb565c3586e5a53405f38bccc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
// SPDX-License-Identifier: GPL-2.0-only
// Miscellaneous Arm SMMU implementation and integration quirks
// Copyright (C) 2019 Arm Limited

#define pr_fmt(fmt) "arm-smmu: " fmt

#include <linux/bitfield.h>
#include <linux/of.h>

#include "arm-smmu.h"


static int arm_smmu_gr0_ns(int offset)
{
	switch(offset) {
	case ARM_SMMU_GR0_sCR0:
	case ARM_SMMU_GR0_sACR:
	case ARM_SMMU_GR0_sGFSR:
	case ARM_SMMU_GR0_sGFSYNR0:
	case ARM_SMMU_GR0_sGFSYNR1:
	case ARM_SMMU_GR0_sGFSYNR2:
		return offset + 0x400;
	default:
		return offset;
	}
}

static u32 arm_smmu_read_ns(struct arm_smmu_device *smmu, int page,
			    int offset)
{
	if (page == ARM_SMMU_GR0)
		offset = arm_smmu_gr0_ns(offset);
	return readl_relaxed(arm_smmu_page(smmu, page) + offset);
}

static void arm_smmu_write_ns(struct arm_smmu_device *smmu, int page,
			      int offset, u32 val)
{
	if (page == ARM_SMMU_GR0)
		offset = arm_smmu_gr0_ns(offset);
	writel_relaxed(val, arm_smmu_page(smmu, page) + offset);
}

/* Since we don't care for sGFAR, we can do without 64-bit accessors */
static const struct arm_smmu_impl calxeda_impl = {
	.read_reg = arm_smmu_read_ns,
	.write_reg = arm_smmu_write_ns,
};


struct cavium_smmu {
	struct arm_smmu_device smmu;
	u32 id_base;
};

static int cavium_cfg_probe(struct arm_smmu_device *smmu)
{
	static atomic_t context_count = ATOMIC_INIT(0);
	struct cavium_smmu *cs = container_of(smmu, struct cavium_smmu, smmu);
	/*
	 * Cavium CN88xx erratum #27704.
	 * Ensure ASID and VMID allocation is unique across all SMMUs in
	 * the system.
	 */
	cs->id_base = atomic_fetch_add(smmu->num_context_banks, &context_count);
	dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");

	return 0;
}

static int cavium_init_context(struct arm_smmu_domain *smmu_domain)
{
	struct cavium_smmu *cs = container_of(smmu_domain->smmu,
					      struct cavium_smmu, smmu);

	if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
		smmu_domain->cfg.vmid += cs->id_base;
	else
		smmu_domain->cfg.asid += cs->id_base;

	return 0;
}

static const struct arm_smmu_impl cavium_impl = {
	.cfg_probe = cavium_cfg_probe,
	.init_context = cavium_init_context,
};

static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smmu)
{
	struct cavium_smmu *cs;

	cs = devm_kzalloc(smmu->dev, sizeof(*cs), GFP_KERNEL);
	if (!cs)
		return ERR_PTR(-ENOMEM);

	cs->smmu = *smmu;
	cs->smmu.impl = &cavium_impl;

	devm_kfree(smmu->dev, smmu);

	return &cs->smmu;
}


#define ARM_MMU500_ACTLR_CPRE		(1 << 1)

#define ARM_MMU500_ACR_CACHE_LOCK	(1 << 26)
#define ARM_MMU500_ACR_S2CRB_TLBEN	(1 << 10)
#define ARM_MMU500_ACR_SMTNMB_TLBEN	(1 << 8)

int arm_mmu500_reset(struct arm_smmu_device *smmu)
{
	u32 reg, major;
	int i;
	/*
	 * On MMU-500 r2p0 onwards we need to clear ACR.CACHE_LOCK before
	 * writes to the context bank ACTLRs will stick. And we just hope that
	 * Secure has also cleared SACR.CACHE_LOCK for this to take effect...
	 */
	reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7);
	major = FIELD_GET(ARM_SMMU_ID7_MAJOR, reg);
	reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR);
	if (major >= 2)
		reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
	/*
	 * Allow unmatched Stream IDs to allocate bypass
	 * TLB entries for reduced latency.
	 */
	reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sACR, reg);

	/*
	 * Disable MMU-500's not-particularly-beneficial next-page
	 * prefetcher for the sake of errata #841119 and #826419.
	 */
	for (i = 0; i < smmu->num_context_banks; ++i) {
		reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR);
		reg &= ~ARM_MMU500_ACTLR_CPRE;
		arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg);
	}

	return 0;
}

static const struct arm_smmu_impl arm_mmu500_impl = {
	.reset = arm_mmu500_reset,
};

static u64 mrvl_mmu500_readq(struct arm_smmu_device *smmu, int page, int off)
{
	/*
	 * Marvell Armada-AP806 erratum #582743.
	 * Split all the readq to double readl
	 */
	return hi_lo_readq_relaxed(arm_smmu_page(smmu, page) + off);
}

static void mrvl_mmu500_writeq(struct arm_smmu_device *smmu, int page, int off,
			       u64 val)
{
	/*
	 * Marvell Armada-AP806 erratum #582743.
	 * Split all the writeq to double writel
	 */
	hi_lo_writeq_relaxed(val, arm_smmu_page(smmu, page) + off);
}

static int mrvl_mmu500_cfg_probe(struct arm_smmu_device *smmu)
{

	/*
	 * Armada-AP806 erratum #582743.
	 * Hide the SMMU_IDR2.PTFSv8 fields to sidestep the AArch64
	 * formats altogether and allow using 32 bits access on the
	 * interconnect.
	 */
	smmu->features &= ~(ARM_SMMU_FEAT_FMT_AARCH64_4K |
			    ARM_SMMU_FEAT_FMT_AARCH64_16K |
			    ARM_SMMU_FEAT_FMT_AARCH64_64K);

	return 0;
}

static const struct arm_smmu_impl mrvl_mmu500_impl = {
	.read_reg64 = mrvl_mmu500_readq,
	.write_reg64 = mrvl_mmu500_writeq,
	.cfg_probe = mrvl_mmu500_cfg_probe,
	.reset = arm_mmu500_reset,
};


struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
{
	const struct device_node *np = smmu->dev->of_node;

	/*
	 * Set the impl for model-specific implementation quirks first,
	 * such that platform integration quirks can pick it up and
	 * inherit from it if necessary.
	 */
	switch (smmu->model) {
	case ARM_MMU500:
		smmu->impl = &arm_mmu500_impl;
		break;
	case CAVIUM_SMMUV2:
		return cavium_smmu_impl_init(smmu);
	default:
		break;
	}

	/* This is implicitly MMU-400 */
	if (of_property_read_bool(np, "calxeda,smmu-secure-config-access"))
		smmu->impl = &calxeda_impl;

	if (of_device_is_compatible(np, "nvidia,tegra194-smmu"))
		return nvidia_smmu_impl_init(smmu);

	if (of_device_is_compatible(np, "qcom,sdm845-smmu-500") ||
	    of_device_is_compatible(np, "qcom,sc7180-smmu-500") ||
	    of_device_is_compatible(np, "qcom,sm8150-smmu-500") ||
	    of_device_is_compatible(np, "qcom,sm8250-smmu-500"))
		return qcom_smmu_impl_init(smmu);

	if (of_device_is_compatible(np, "marvell,ap806-smmu-500"))
		smmu->impl = &mrvl_mmu500_impl;

	return smmu;
}