diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-17 20:32:50 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-17 20:32:50 +0200 |
commit | 5e2d059b52e397d9ac42f4c4d9d9a841887b5818 (patch) | |
tree | c8cd8fd7187113be33e29fcc75f45a8bbc27e6b2 /drivers | |
parent | Merge tag 'modules-for-v4.19' of git://git.kernel.org/pub/scm/linux/kernel/gi... (diff) | |
parent | powerpc/mm/book3s/radix: Add mapping statistics (diff) | |
download | linux-5e2d059b52e397d9ac42f4c4d9d9a841887b5818.tar.xz linux-5e2d059b52e397d9ac42f4c4d9d9a841887b5818.zip |
Merge tag 'powerpc-4.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman:
"Notable changes:
- A fix for a bug in our page table fragment allocator, where a page
table page could be freed and reallocated for something else while
still in use, leading to memory corruption etc. The fix reuses
pt_mm in struct page (x86 only) for a powerpc only refcount.
- Fixes to our pkey support. Several are user-visible changes, but
bring us in to line with x86 behaviour and/or fix outright bugs.
Thanks to Florian Weimer for reporting many of these.
- A series to improve the hvc driver & related OPAL console code,
which have been seen to cause hardlockups at times. The hvc driver
changes in particular have been in linux-next for ~month.
- Increase our MAX_PHYSMEM_BITS to 128TB when SPARSEMEM_VMEMMAP=y.
- Remove Power8 DD1 and Power9 DD1 support, neither chip should be in
use anywhere other than as a paper weight.
- An optimised memcmp implementation using Power7-or-later VMX
instructions
- Support for barrier_nospec on some NXP CPUs.
- Support for flushing the count cache on context switch on some IBM
CPUs (controlled by firmware), as a Spectre v2 mitigation.
- A series to enhance the information we print on unhandled signals
to bring it into line with other arches, including showing the
offending VMA and dumping the instructions around the fault.
Thanks to: Aaro Koskinen, Akshay Adiga, Alastair D'Silva, Alexey
Kardashevskiy, Alexey Spirkov, Alistair Popple, Andrew Donnellan,
Aneesh Kumar K.V, Anju T Sudhakar, Arnd Bergmann, Bartosz Golaszewski,
Benjamin Herrenschmidt, Bharat Bhushan, Bjoern Noetel, Boqun Feng,
Breno Leitao, Bryant G. Ly, Camelia Groza, Christophe Leroy, Christoph
Hellwig, Cyril Bur, Dan Carpenter, Daniel Klamt, Darren Stevens, Dave
Young, David Gibson, Diana Craciun, Finn Thain, Florian Weimer,
Frederic Barrat, Gautham R. Shenoy, Geert Uytterhoeven, Geoff Levand,
Guenter Roeck, Gustavo Romero, Haren Myneni, Hari Bathini, Joel
Stanley, Jonathan Neuschäfer, Kees Cook, Madhavan Srinivasan, Mahesh
Salgaonkar, Markus Elfring, Mathieu Malaterre, Mauro S. M. Rodrigues,
Michael Hanselmann, Michael Neuling, Michael Schmitz, Mukesh Ojha,
Murilo Opsfelder Araujo, Nicholas Piggin, Parth Y Shah, Paul
Mackerras, Paul Menzel, Ram Pai, Randy Dunlap, Rashmica Gupta, Reza
Arbab, Rodrigo R. Galvao, Russell Currey, Sam Bobroff, Scott Wood,
Shilpasri G Bhat, Simon Guo, Souptick Joarder, Stan Johnson, Thiago
Jung Bauermann, Tyrel Datwyler, Vaibhav Jain, Vasant Hegde, Venkat
Rao, zhong jiang"
* tag 'powerpc-4.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (234 commits)
powerpc/mm/book3s/radix: Add mapping statistics
powerpc/uaccess: Enable get_user(u64, *p) on 32-bit
powerpc/mm/hash: Remove unnecessary do { } while(0) loop
powerpc/64s: move machine check SLB flushing to mm/slb.c
powerpc/powernv/idle: Fix build error
powerpc/mm/tlbflush: update the mmu_gather page size while iterating address range
powerpc/mm: remove warning about ‘type’ being set
powerpc/32: Include setup.h header file to fix warnings
powerpc: Move `path` variable inside DEBUG_PROM
powerpc/powermac: Make some functions static
powerpc/powermac: Remove variable x that's never read
cxl: remove a dead branch
powerpc/powermac: Add missing include of header pmac.h
powerpc/kexec: Use common error handling code in setup_new_fdt()
powerpc/xmon: Add address lookup for percpu symbols
powerpc/mm: remove huge_pte_offset_and_shift() prototype
powerpc/lib: Use patch_site to patch copy_32 functions once cache is enabled
powerpc/pseries: Fix endianness while restoring of r3 in MCE handler.
powerpc/fadump: merge adjacent memory ranges to reduce PT_LOAD segements
powerpc/fadump: handle crash memory ranges array index overflow
...
Diffstat (limited to 'drivers')
33 files changed, 758 insertions, 2041 deletions
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 54edaec1e608..bf6519cf64bc 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -758,8 +758,13 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy, cur_msec = jiffies_to_msecs(get_jiffies_64()); - spin_lock(&gpstates->gpstate_lock); freq_data.pstate_id = idx_to_pstate(new_index); + if (!gpstates) { + freq_data.gpstate_id = freq_data.pstate_id; + goto no_gpstate; + } + + spin_lock(&gpstates->gpstate_lock); if (!gpstates->last_sampled_time) { gpstate_idx = new_index; @@ -809,6 +814,7 @@ gpstates_done: spin_unlock(&gpstates->gpstate_lock); +no_gpstate: /* * Use smp_call_function to send IPI and execute the * mtspr on target CPU. We could do that without IPI @@ -843,6 +849,13 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy) kernfs_put(kn); } + policy->freq_table = powernv_freqs; + policy->fast_switch_possible = true; + + if (pvr_version_is(PVR_POWER9)) + return 0; + + /* Initialise Gpstate ramp-down timer only on POWER8 */ gpstates = kzalloc(sizeof(*gpstates), GFP_KERNEL); if (!gpstates) return -ENOMEM; @@ -857,8 +870,6 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy) msecs_to_jiffies(GPSTATE_TIMER_INTERVAL); spin_lock_init(&gpstates->gpstate_lock); - policy->freq_table = powernv_freqs; - policy->fast_switch_possible = true; return 0; } @@ -998,7 +1009,8 @@ static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy) freq_data.pstate_id = idx_to_pstate(powernv_pstate_info.min); freq_data.gpstate_id = idx_to_pstate(powernv_pstate_info.min); smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1); - del_timer_sync(&gpstates->timer); + if (gpstates) + del_timer_sync(&gpstates->timer); } static unsigned int powernv_fast_switch(struct cpufreq_policy *policy, diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index d29e4f041efe..84b1ebe212b3 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c @@ -242,6 +242,7 @@ static inline void add_powernv_state(int index, const char *name, powernv_states[index].target_residency = target_residency; powernv_states[index].exit_latency = exit_latency; powernv_states[index].enter = idle_fn; + /* For power8 and below psscr_* will be 0 */ stop_psscr_table[index].val = psscr_val; stop_psscr_table[index].mask = psscr_mask; } @@ -263,186 +264,80 @@ static inline int validate_dt_prop_sizes(const char *prop1, int prop1_len, extern u32 pnv_get_supported_cpuidle_states(void); static int powernv_add_idle_states(void) { - struct device_node *power_mgt; int nr_idle_states = 1; /* Snooze */ - int dt_idle_states, count; - u32 latency_ns[CPUIDLE_STATE_MAX]; - u32 residency_ns[CPUIDLE_STATE_MAX]; - u32 flags[CPUIDLE_STATE_MAX]; - u64 psscr_val[CPUIDLE_STATE_MAX]; - u64 psscr_mask[CPUIDLE_STATE_MAX]; - const char *names[CPUIDLE_STATE_MAX]; + int dt_idle_states; u32 has_stop_states = 0; - int i, rc; + int i; u32 supported_flags = pnv_get_supported_cpuidle_states(); /* Currently we have snooze statically defined */ - - power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); - if (!power_mgt) { - pr_warn("opal: PowerMgmt Node not found\n"); - goto out; - } - - /* Read values of any property to determine the num of idle states */ - dt_idle_states = of_property_count_u32_elems(power_mgt, "ibm,cpu-idle-state-flags"); - if (dt_idle_states < 0) { - pr_warn("cpuidle-powernv: no idle states found in the DT\n"); + if (nr_pnv_idle_states <= 0) { + pr_warn("cpuidle-powernv : Only Snooze is available\n"); goto out; } - count = of_property_count_u32_elems(power_mgt, - "ibm,cpu-idle-state-latencies-ns"); - - if (validate_dt_prop_sizes("ibm,cpu-idle-state-flags", dt_idle_states, - "ibm,cpu-idle-state-latencies-ns", - count) != 0) - goto out; - - count = of_property_count_strings(power_mgt, - "ibm,cpu-idle-state-names"); - if (validate_dt_prop_sizes("ibm,cpu-idle-state-flags", dt_idle_states, - "ibm,cpu-idle-state-names", - count) != 0) - goto out; + /* TODO: Count only states which are eligible for cpuidle */ + dt_idle_states = nr_pnv_idle_states; /* * Since snooze is used as first idle state, max idle states allowed is * CPUIDLE_STATE_MAX -1 */ - if (dt_idle_states > CPUIDLE_STATE_MAX - 1) { + if (nr_pnv_idle_states > CPUIDLE_STATE_MAX - 1) { pr_warn("cpuidle-powernv: discovered idle states more than allowed"); dt_idle_states = CPUIDLE_STATE_MAX - 1; } - if (of_property_read_u32_array(power_mgt, - "ibm,cpu-idle-state-flags", flags, dt_idle_states)) { - pr_warn("cpuidle-powernv : missing ibm,cpu-idle-state-flags in DT\n"); - goto out; - } - - if (of_property_read_u32_array(power_mgt, - "ibm,cpu-idle-state-latencies-ns", latency_ns, - dt_idle_states)) { - pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n"); - goto out; - } - if (of_property_read_string_array(power_mgt, - "ibm,cpu-idle-state-names", names, dt_idle_states) < 0) { - pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-names in DT\n"); - goto out; - } - /* * If the idle states use stop instruction, probe for psscr values * and psscr mask which are necessary to specify required stop level. */ - has_stop_states = (flags[0] & + has_stop_states = (pnv_idle_states[0].flags & (OPAL_PM_STOP_INST_FAST | OPAL_PM_STOP_INST_DEEP)); - if (has_stop_states) { - count = of_property_count_u64_elems(power_mgt, - "ibm,cpu-idle-state-psscr"); - if (validate_dt_prop_sizes("ibm,cpu-idle-state-flags", - dt_idle_states, - "ibm,cpu-idle-state-psscr", - count) != 0) - goto out; - - count = of_property_count_u64_elems(power_mgt, - "ibm,cpu-idle-state-psscr-mask"); - if (validate_dt_prop_sizes("ibm,cpu-idle-state-flags", - dt_idle_states, - "ibm,cpu-idle-state-psscr-mask", - count) != 0) - goto out; - - if (of_property_read_u64_array(power_mgt, - "ibm,cpu-idle-state-psscr", psscr_val, dt_idle_states)) { - pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n"); - goto out; - } - - if (of_property_read_u64_array(power_mgt, - "ibm,cpu-idle-state-psscr-mask", - psscr_mask, dt_idle_states)) { - pr_warn("cpuidle-powernv:Missing ibm,cpu-idle-state-psscr-mask in DT\n"); - goto out; - } - } - - count = of_property_count_u32_elems(power_mgt, - "ibm,cpu-idle-state-residency-ns"); - - if (count < 0) { - rc = count; - } else if (validate_dt_prop_sizes("ibm,cpu-idle-state-flags", - dt_idle_states, - "ibm,cpu-idle-state-residency-ns", - count) != 0) { - goto out; - } else { - rc = of_property_read_u32_array(power_mgt, - "ibm,cpu-idle-state-residency-ns", - residency_ns, dt_idle_states); - } for (i = 0; i < dt_idle_states; i++) { unsigned int exit_latency, target_residency; bool stops_timebase = false; + struct pnv_idle_states_t *state = &pnv_idle_states[i]; /* * Skip the platform idle state whose flag isn't in * the supported_cpuidle_states flag mask. */ - if ((flags[i] & supported_flags) != flags[i]) + if ((state->flags & supported_flags) != state->flags) continue; /* * If an idle state has exit latency beyond * POWERNV_THRESHOLD_LATENCY_NS then don't use it * in cpu-idle. */ - if (latency_ns[i] > POWERNV_THRESHOLD_LATENCY_NS) + if (state->latency_ns > POWERNV_THRESHOLD_LATENCY_NS) continue; /* * Firmware passes residency and latency values in ns. * cpuidle expects it in us. */ - exit_latency = DIV_ROUND_UP(latency_ns[i], 1000); - if (!rc) - target_residency = DIV_ROUND_UP(residency_ns[i], 1000); - else - target_residency = 0; - - if (has_stop_states) { - int err = validate_psscr_val_mask(&psscr_val[i], - &psscr_mask[i], - flags[i]); - if (err) { - report_invalid_psscr_val(psscr_val[i], err); + exit_latency = DIV_ROUND_UP(state->latency_ns, 1000); + target_residency = DIV_ROUND_UP(state->residency_ns, 1000); + + if (has_stop_states && !(state->valid)) continue; - } - } - if (flags[i] & OPAL_PM_TIMEBASE_STOP) + if (state->flags & OPAL_PM_TIMEBASE_STOP) stops_timebase = true; - /* - * For nap and fastsleep, use default target_residency - * values if f/w does not expose it. - */ - if (flags[i] & OPAL_PM_NAP_ENABLED) { - if (!rc) - target_residency = 100; + if (state->flags & OPAL_PM_NAP_ENABLED) { /* Add NAP state */ add_powernv_state(nr_idle_states, "Nap", CPUIDLE_FLAG_NONE, nap_loop, target_residency, exit_latency, 0, 0); } else if (has_stop_states && !stops_timebase) { - add_powernv_state(nr_idle_states, names[i], + add_powernv_state(nr_idle_states, state->name, CPUIDLE_FLAG_NONE, stop_loop, target_residency, exit_latency, - psscr_val[i], psscr_mask[i]); + state->psscr_val, + state->psscr_mask); } /* @@ -450,20 +345,19 @@ static int powernv_add_idle_states(void) * within this config dependency check. */ #ifdef CONFIG_TICK_ONESHOT - else if (flags[i] & OPAL_PM_SLEEP_ENABLED || - flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) { - if (!rc) - target_residency = 300000; + else if (state->flags & OPAL_PM_SLEEP_ENABLED || + state->flags & OPAL_PM_SLEEP_ENABLED_ER1) { /* Add FASTSLEEP state */ add_powernv_state(nr_idle_states, "FastSleep", CPUIDLE_FLAG_TIMER_STOP, fastsleep_loop, target_residency, exit_latency, 0, 0); } else if (has_stop_states && stops_timebase) { - add_powernv_state(nr_idle_states, names[i], + add_powernv_state(nr_idle_states, state->name, CPUIDLE_FLAG_TIMER_STOP, stop_loop, target_residency, exit_latency, - psscr_val[i], psscr_mask[i]); + state->psscr_val, + state->psscr_mask); } #endif else diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c index 36afd6d8753c..c68df7e8bee1 100644 --- a/drivers/crypto/nx/nx-842-powernv.c +++ b/drivers/crypto/nx/nx-842-powernv.c @@ -24,6 +24,8 @@ #include <asm/icswx.h> #include <asm/vas.h> #include <asm/reg.h> +#include <asm/opal-api.h> +#include <asm/opal.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); @@ -753,7 +755,7 @@ static int nx842_open_percpu_txwins(void) } static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id, - int vasid) + int vasid, int *ct) { struct vas_window *rxwin = NULL; struct vas_rx_win_attr rxattr; @@ -837,6 +839,15 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id, coproc->vas.id = vasid; nx842_add_coprocs_list(coproc, chip_id); + /* + * (lpid, pid, tid) combination has to be unique for each + * coprocessor instance in the system. So to make it + * unique, skiboot uses coprocessor type such as 842 or + * GZIP for pid and provides this value to kernel in pid + * device-tree property. + */ + *ct = pid; + return 0; err_out: @@ -850,6 +861,7 @@ static int __init nx842_powernv_probe_vas(struct device_node *pn) struct device_node *dn; int chip_id, vasid, ret = 0; int nx_fifo_found = 0; + int uninitialized_var(ct); chip_id = of_get_ibm_chip_id(pn); if (chip_id < 0) { @@ -865,7 +877,7 @@ static int __init nx842_powernv_probe_vas(struct device_node *pn) for_each_child_of_node(pn, dn) { if (of_device_is_compatible(dn, "ibm,p9-nx-842")) { - ret = vas_cfg_coproc_info(dn, chip_id, vasid); + ret = vas_cfg_coproc_info(dn, chip_id, vasid, &ct); if (ret) { of_node_put(dn); return ret; @@ -876,9 +888,22 @@ static int __init nx842_powernv_probe_vas(struct device_node *pn) if (!nx_fifo_found) { pr_err("NX842 FIFO nodes are missing\n"); - ret = -EINVAL; + return -EINVAL; } + /* + * Initialize NX instance for both high and normal priority FIFOs. + */ + if (opal_check_token(OPAL_NX_COPROC_INIT)) { + ret = opal_nx_coproc_init(chip_id, ct); + if (ret) { + pr_err("Failed to initialize NX for chip(%d): %d\n", + chip_id, ret); + ret = opal_error_code(ret); + } + } else + pr_warn("Firmware doesn't support NX initialization\n"); + return ret; } diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c index f829dadfd5a0..83472808c816 100644 --- a/drivers/hwmon/ibmpowernv.c +++ b/drivers/hwmon/ibmpowernv.c @@ -90,11 +90,20 @@ struct sensor_data { char label[MAX_LABEL_LEN]; char name[MAX_ATTR_LEN]; struct device_attribute dev_attr; + struct sensor_group_data *sgrp_data; +}; + +struct sensor_group_data { + struct mutex mutex; + u32 gid; + bool enable; }; struct platform_data { const struct attribute_group *attr_groups[MAX_SENSOR_TYPE + 1]; + struct sensor_group_data *sgrp_data; u32 sensors_count; /* Total count of sensors from each group */ + u32 nr_sensor_groups; /* Total number of sensor groups */ }; static ssize_t show_sensor(struct device *dev, struct device_attribute *devattr, @@ -105,6 +114,9 @@ static ssize_t show_sensor(struct device *dev, struct device_attribute *devattr, ssize_t ret; u64 x; + if (sdata->sgrp_data && !sdata->sgrp_data->enable) + return -ENODATA; + ret = opal_get_sensor_data_u64(sdata->id, &x); if (ret) @@ -120,6 +132,46 @@ static ssize_t show_sensor(struct device *dev, struct device_attribute *devattr, return sprintf(buf, "%llu\n", x); } +static ssize_t show_enable(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct sensor_data *sdata = container_of(devattr, struct sensor_data, + dev_attr); + + return sprintf(buf, "%u\n", sdata->sgrp_data->enable); +} + +static ssize_t store_enable(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct sensor_data *sdata = container_of(devattr, struct sensor_data, + dev_attr); + struct sensor_group_data *sgrp_data = sdata->sgrp_data; + int ret; + bool data; + + ret = kstrtobool(buf, &data); + if (ret) + return ret; + + ret = mutex_lock_interruptible(&sgrp_data->mutex); + if (ret) + return ret; + + if (data != sgrp_data->enable) { + ret = sensor_group_enable(sgrp_data->gid, data); + if (!ret) + sgrp_data->enable = data; + } + + if (!ret) + ret = count; + + mutex_unlock(&sgrp_data->mutex); + return ret; +} + static ssize_t show_label(struct device *dev, struct device_attribute *devattr, char *buf) { @@ -292,12 +344,115 @@ static u32 get_sensor_hwmon_index(struct sensor_data *sdata, return ++sensor_groups[sdata->type].hwmon_index; } +static int init_sensor_group_data(struct platform_device *pdev, + struct platform_data *pdata) +{ + struct sensor_group_data *sgrp_data; + struct device_node *groups, *sgrp; + int count = 0, ret = 0; + enum sensors type; + + groups = of_find_compatible_node(NULL, NULL, "ibm,opal-sensor-group"); + if (!groups) + return ret; + + for_each_child_of_node(groups, sgrp) { + type = get_sensor_type(sgrp); + if (type != MAX_SENSOR_TYPE) + pdata->nr_sensor_groups++; + } + + if (!pdata->nr_sensor_groups) + goto out; + + sgrp_data = devm_kcalloc(&pdev->dev, pdata->nr_sensor_groups, + sizeof(*sgrp_data), GFP_KERNEL); + if (!sgrp_data) { + ret = -ENOMEM; + goto out; + } + + for_each_child_of_node(groups, sgrp) { + u32 gid; + + type = get_sensor_type(sgrp); + if (type == MAX_SENSOR_TYPE) + continue; + + if (of_property_read_u32(sgrp, "sensor-group-id", &gid)) + continue; + + if (of_count_phandle_with_args(sgrp, "sensors", NULL) <= 0) + continue; + + sensor_groups[type].attr_count++; + sgrp_data[count].gid = gid; + mutex_init(&sgrp_data[count].mutex); + sgrp_data[count++].enable = false; + } + + pdata->sgrp_data = sgrp_data; +out: + of_node_put(groups); + return ret; +} + +static struct sensor_group_data *get_sensor_group(struct platform_data *pdata, + struct device_node *node, + enum sensors gtype) +{ + struct sensor_group_data *sgrp_data = pdata->sgrp_data; + struct device_node *groups, *sgrp; + + groups = of_find_compatible_node(NULL, NULL, "ibm,opal-sensor-group"); + if (!groups) + return NULL; + + for_each_child_of_node(groups, sgrp) { + struct of_phandle_iterator it; + u32 gid; + int rc, i; + enum sensors type; + + type = get_sensor_type(sgrp); + if (type != gtype) + continue; + + if (of_property_read_u32(sgrp, "sensor-group-id", &gid)) + continue; + + of_for_each_phandle(&it, rc, sgrp, "sensors", NULL, 0) + if (it.phandle == node->phandle) { + of_node_put(it.node); + break; + } + + if (rc) + continue; + + for (i = 0; i < pdata->nr_sensor_groups; i++) + if (gid == sgrp_data[i].gid) { + of_node_put(sgrp); + of_node_put(groups); + return &sgrp_data[i]; + } + } + + of_node_put(groups); + return NULL; +} + static int populate_attr_groups(struct platform_device *pdev) { struct platform_data *pdata = platform_get_drvdata(pdev); const struct attribute_group **pgroups = pdata->attr_groups; struct device_node *opal, *np; enum sensors type; + int ret; + + ret = init_sensor_group_data(pdev, pdata); + if (ret) + return ret; opal = of_find_node_by_path("/ibm,opal/sensors"); for_each_child_of_node(opal, np) { @@ -344,7 +499,10 @@ static int populate_attr_groups(struct platform_device *pdev) static void create_hwmon_attr(struct sensor_data *sdata, const char *attr_name, ssize_t (*show)(struct device *dev, struct device_attribute *attr, - char *buf)) + char *buf), + ssize_t (*store)(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count)) { snprintf(sdata->name, MAX_ATTR_LEN, "%s%d_%s", sensor_groups[sdata->type].name, sdata->hwmon_index, @@ -352,23 +510,33 @@ static void create_hwmon_attr(struct sensor_data *sdata, const char *attr_name, sysfs_attr_init(&sdata->dev_attr.attr); sdata->dev_attr.attr.name = sdata->name; - sdata->dev_attr.attr.mode = S_IRUGO; sdata->dev_attr.show = show; + if (store) { + sdata->dev_attr.store = store; + sdata->dev_attr.attr.mode = 0664; + } else { + sdata->dev_attr.attr.mode = 0444; + } } static void populate_sensor(struct sensor_data *sdata, int od, int hd, int sid, const char *attr_name, enum sensors type, const struct attribute_group *pgroup, + struct sensor_group_data *sgrp_data, ssize_t (*show)(struct device *dev, struct device_attribute *attr, - char *buf)) + char *buf), + ssize_t (*store)(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count)) { sdata->id = sid; sdata->type = type; sdata->opal_index = od; sdata->hwmon_index = hd; - create_hwmon_attr(sdata, attr_name, show); + create_hwmon_attr(sdata, attr_name, show, store); pgroup->attrs[sensor_groups[type].attr_count++] = &sdata->dev_attr.attr; + sdata->sgrp_data = sgrp_data; } static char *get_max_attr(enum sensors type) @@ -403,24 +571,23 @@ static int create_device_attrs(struct platform_device *pdev) const struct attribute_group **pgroups = pdata->attr_groups; struct device_node *opal, *np; struct sensor_data *sdata; - u32 sensor_id; - enum sensors type; u32 count = 0; - int err = 0; + u32 group_attr_id[MAX_SENSOR_TYPE] = {0}; - opal = of_find_node_by_path("/ibm,opal/sensors"); sdata = devm_kcalloc(&pdev->dev, pdata->sensors_count, sizeof(*sdata), GFP_KERNEL); - if (!sdata) { - err = -ENOMEM; - goto exit_put_node; - } + if (!sdata) + return -ENOMEM; + opal = of_find_node_by_path("/ibm,opal/sensors"); for_each_child_of_node(opal, np) { + struct sensor_group_data *sgrp_data; const char *attr_name; - u32 opal_index; + u32 opal_index, hw_id; + u32 sensor_id; const char *label; + enum sensors type; if (np->name == NULL) continue; @@ -456,14 +623,12 @@ static int create_device_attrs(struct platform_device *pdev) opal_index = INVALID_INDEX; } - sdata[count].opal_index = opal_index; - sdata[count].hwmon_index = - get_sensor_hwmon_index(&sdata[count], sdata, count); - - create_hwmon_attr(&sdata[count], attr_name, show_sensor); - - pgroups[type]->attrs[sensor_groups[type].attr_count++] = - &sdata[count++].dev_attr.attr; + hw_id = get_sensor_hwmon_index(&sdata[count], sdata, count); + sgrp_data = get_sensor_group(pdata, np, type); + populate_sensor(&sdata[count], opal_index, hw_id, sensor_id, + attr_name, type, pgroups[type], sgrp_data, + show_sensor, NULL); + count++; if (!of_property_read_string(np, "label", &label)) { /* @@ -474,35 +639,43 @@ static int create_device_attrs(struct platform_device *pdev) */ make_sensor_label(np, &sdata[count], label); - populate_sensor(&sdata[count], opal_index, - sdata[count - 1].hwmon_index, + populate_sensor(&sdata[count], opal_index, hw_id, sensor_id, "label", type, pgroups[type], - show_label); + NULL, show_label, NULL); count++; } if (!of_property_read_u32(np, "sensor-data-max", &sensor_id)) { attr_name = get_max_attr(type); - populate_sensor(&sdata[count], opal_index, - sdata[count - 1].hwmon_index, + populate_sensor(&sdata[count], opal_index, hw_id, sensor_id, attr_name, type, - pgroups[type], show_sensor); + pgroups[type], sgrp_data, show_sensor, + NULL); count++; } if (!of_property_read_u32(np, "sensor-data-min", &sensor_id)) { attr_name = get_min_attr(type); - populate_sensor(&sdata[count], opal_index, - sdata[count - 1].hwmon_index, + populate_sensor(&sdata[count], opal_index, hw_id, sensor_id, attr_name, type, - pgroups[type], show_sensor); + pgroups[type], sgrp_data, show_sensor, + NULL); + count++; + } + + if (sgrp_data && !sgrp_data->enable) { + sgrp_data->enable = true; + hw_id = ++group_attr_id[type]; + populate_sensor(&sdata[count], opal_index, hw_id, + sgrp_data->gid, "enable", type, + pgroups[type], sgrp_data, show_enable, + store_enable); count++; } } -exit_put_node: of_node_put(opal); - return err; + return 0; } static int ibmpowernv_probe(struct platform_device *pdev) @@ -517,6 +690,7 @@ static int ibmpowernv_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pdata); pdata->sensors_count = 0; + pdata->nr_sensor_groups = 0; err = populate_attr_groups(pdev); if (err) return err; diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig index 97a420c11eed..47c350cdfb12 100644 --- a/drivers/macintosh/Kconfig +++ b/drivers/macintosh/Kconfig @@ -39,17 +39,6 @@ config ADB_IOP <http://www.angelfire.com/ca2/dev68k/iopdesc.html> to enable direct support for it, say 'Y' here. -config ADB_PMU68K - bool "Include PMU (Powerbook) ADB driver" - depends on ADB && MAC - help - Say Y here if want your kernel to support the m68k based Powerbooks. - This includes the PowerBook 140, PowerBook 145, PowerBook 150, - PowerBook 160, PowerBook 165, PowerBook 165c, PowerBook 170, - PowerBook 180, PowerBook, 180c, PowerBook 190cs, PowerBook 520, - PowerBook Duo 210, PowerBook Duo 230, PowerBook Duo 250, - PowerBook Duo 270c, PowerBook Duo 280 and PowerBook Duo 280c. - # we want to change this to something like CONFIG_SYSCTRL_CUDA/PMU config ADB_CUDA bool "Support for Cuda/Egret based Macs and PowerMacs" @@ -65,8 +54,8 @@ config ADB_CUDA If unsure say Y. config ADB_PMU - bool "Support for PMU based PowerMacs" - depends on PPC_PMAC + bool "Support for PMU based PowerMacs and PowerBooks" + depends on PPC_PMAC || MAC help On PowerBooks, iBooks, and recent iMacs and Power Macintoshes, the PMU is an embedded microprocessor whose primary function is to @@ -79,7 +68,7 @@ config ADB_PMU config ADB_PMU_LED bool "Support for the Power/iBook front LED" - depends on ADB_PMU + depends on PPC_PMAC && ADB_PMU select NEW_LEDS select LEDS_CLASS help @@ -122,7 +111,7 @@ config PMAC_MEDIABAY config PMAC_BACKLIGHT bool "Backlight control for LCD screens" - depends on ADB_PMU && FB = y && (BROKEN || !PPC64) + depends on PPC_PMAC && ADB_PMU && FB = y && (BROKEN || !PPC64) select FB_BACKLIGHT help Say Y here to enable Macintosh specific extensions of the generic diff --git a/drivers/macintosh/Makefile b/drivers/macintosh/Makefile index ee803638e595..49819b1b6f20 100644 --- a/drivers/macintosh/Makefile +++ b/drivers/macintosh/Makefile @@ -22,7 +22,6 @@ obj-$(CONFIG_PMAC_SMU) += smu.o obj-$(CONFIG_ADB) += adb.o obj-$(CONFIG_ADB_MACII) += via-macii.o obj-$(CONFIG_ADB_IOP) += adb-iop.o -obj-$(CONFIG_ADB_PMU68K) += via-pmu68k.o obj-$(CONFIG_ADB_MACIO) += macio-adb.o obj-$(CONFIG_THERM_WINDTUNNEL) += therm_windtunnel.o diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c index 4c8097e0e6fe..76e98f0f7a3e 100644 --- a/drivers/macintosh/adb.c +++ b/drivers/macintosh/adb.c @@ -65,7 +65,7 @@ static struct adb_driver *adb_driver_list[] = { #ifdef CONFIG_ADB_IOP &adb_iop_driver, #endif -#if defined(CONFIG_ADB_PMU) || defined(CONFIG_ADB_PMU68K) +#ifdef CONFIG_ADB_PMU &via_pmu_driver, #endif #ifdef CONFIG_ADB_MACIO diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 25c1ce811053..d72c450aebe5 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Device driver for the via-pmu on Apple Powermacs. + * Device driver for the PMU in Apple PowerBooks and PowerMacs. * * The VIA (versatile interface adapter) interfaces to the PMU, * a 6805 microprocessor core whose primary function is to control @@ -49,20 +49,26 @@ #include <linux/compat.h> #include <linux/of_address.h> #include <linux/of_irq.h> -#include <asm/prom.h> +#include <linux/uaccess.h> #include <asm/machdep.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/sections.h> #include <asm/irq.h> +#ifdef CONFIG_PPC_PMAC #include <asm/pmac_feature.h> #include <asm/pmac_pfunc.h> #include <asm/pmac_low_i2c.h> -#include <linux/uaccess.h> +#include <asm/prom.h> #include <asm/mmu_context.h> #include <asm/cputable.h> #include <asm/time.h> #include <asm/backlight.h> +#else +#include <asm/macintosh.h> +#include <asm/macints.h> +#include <asm/mac_via.h> +#endif #include "via-pmu-event.h" @@ -76,7 +82,6 @@ #define BATTERY_POLLING_COUNT 2 static DEFINE_MUTEX(pmu_info_proc_mutex); -static volatile unsigned char __iomem *via; /* VIA registers - spaced 0x200 bytes apart */ #define RS 0x200 /* skip between registers */ @@ -98,8 +103,13 @@ static volatile unsigned char __iomem *via; #define ANH (15*RS) /* A-side data, no handshake */ /* Bits in B data register: both active low */ +#ifdef CONFIG_PPC_PMAC #define TACK 0x08 /* Transfer acknowledge (input) */ #define TREQ 0x10 /* Transfer request (output) */ +#else +#define TACK 0x02 +#define TREQ 0x04 +#endif /* Bits in ACR */ #define SR_CTRL 0x1c /* Shift register control bits */ @@ -114,6 +124,7 @@ static volatile unsigned char __iomem *via; #define CB1_INT 0x10 /* transition on CB1 input */ static volatile enum pmu_state { + uninitialized = 0, idle, sending, intack, @@ -140,11 +151,15 @@ static int data_index; static int data_len; static volatile int adb_int_pending; static volatile int disable_poll; -static struct device_node *vias; static int pmu_kind = PMU_UNKNOWN; static int pmu_fully_inited; static int pmu_has_adb; +#ifdef CONFIG_PPC_PMAC +static volatile unsigned char __iomem *via1; +static volatile unsigned char __iomem *via2; +static struct device_node *vias; static struct device_node *gpio_node; +#endif static unsigned char __iomem *gpio_reg; static int gpio_irq = 0; static int gpio_irq_enabled = -1; @@ -157,7 +172,9 @@ static int drop_interrupts; static int option_lid_wakeup = 1; #endif /* CONFIG_SUSPEND && CONFIG_PPC32 */ static unsigned long async_req_locks; -static unsigned int pmu_irq_stats[11]; + +#define NUM_IRQ_STATS 13 +static unsigned int pmu_irq_stats[NUM_IRQ_STATS]; static struct proc_dir_entry *proc_pmu_root; static struct proc_dir_entry *proc_pmu_info; @@ -271,10 +288,11 @@ static char *pbook_type[] = { int __init find_via_pmu(void) { +#ifdef CONFIG_PPC_PMAC u64 taddr; const u32 *reg; - if (via) + if (pmu_state != uninitialized) return 1; vias = of_find_node_by_name(NULL, "via-pmu"); if (vias == NULL) @@ -339,50 +357,70 @@ int __init find_via_pmu(void) } else pmu_kind = PMU_UNKNOWN; - via = ioremap(taddr, 0x2000); - if (via == NULL) { + via1 = via2 = ioremap(taddr, 0x2000); + if (via1 == NULL) { printk(KERN_ERR "via-pmu: Can't map address !\n"); goto fail_via_remap; } - out_8(&via[IER], IER_CLR | 0x7f); /* disable all intrs */ - out_8(&via[IFR], 0x7f); /* clear IFR */ + out_8(&via1[IER], IER_CLR | 0x7f); /* disable all intrs */ + out_8(&via1[IFR], 0x7f); /* clear IFR */ pmu_state = idle; if (!init_pmu()) goto fail_init; - printk(KERN_INFO "PMU driver v%d initialized for %s, firmware: %02x\n", - PMU_DRIVER_VERSION, pbook_type[pmu_kind], pmu_version); - sys_ctrler = SYS_CTRLER_PMU; return 1; fail_init: - iounmap(via); - via = NULL; + iounmap(via1); + via1 = via2 = NULL; fail_via_remap: iounmap(gpio_reg); gpio_reg = NULL; fail: of_node_put(vias); vias = NULL; + pmu_state = uninitialized; return 0; +#else + if (macintosh_config->adb_type != MAC_ADB_PB2) + return 0; + + pmu_kind = PMU_UNKNOWN; + + spin_lock_init(&pmu_lock); + + pmu_has_adb = 1; + + pmu_intr_mask = PMU_INT_PCEJECT | + PMU_INT_SNDBRT | + PMU_INT_ADB | + PMU_INT_TICK; + + pmu_state = idle; + + if (!init_pmu()) { + pmu_state = uninitialized; + return 0; + } + + return 1; +#endif /* !CONFIG_PPC_PMAC */ } #ifdef CONFIG_ADB static int pmu_probe(void) { - return vias == NULL? -ENODEV: 0; + return pmu_state == uninitialized ? -ENODEV : 0; } -static int __init pmu_init(void) +static int pmu_init(void) { - if (vias == NULL) - return -ENODEV; - return 0; + return pmu_state == uninitialized ? -ENODEV : 0; } #endif /* CONFIG_ADB */ @@ -395,13 +433,14 @@ static int __init pmu_init(void) */ static int __init via_pmu_start(void) { - unsigned int irq; + unsigned int __maybe_unused irq; - if (vias == NULL) + if (pmu_state == uninitialized) return -ENODEV; batt_req.complete = 1; +#ifdef CONFIG_PPC_PMAC irq = irq_of_parse_and_map(vias, 0); if (!irq) { printk(KERN_ERR "via-pmu: can't map interrupt\n"); @@ -437,7 +476,20 @@ static int __init via_pmu_start(void) } /* Enable interrupts */ - out_8(&via[IER], IER_SET | SR_INT | CB1_INT); + out_8(&via1[IER], IER_SET | SR_INT | CB1_INT); +#else + if (request_irq(IRQ_MAC_ADB_SR, via_pmu_interrupt, IRQF_NO_SUSPEND, + "VIA-PMU-SR", NULL)) { + pr_err("%s: couldn't get SR irq\n", __func__); + return -ENODEV; + } + if (request_irq(IRQ_MAC_ADB_CL, via_pmu_interrupt, IRQF_NO_SUSPEND, + "VIA-PMU-CL", NULL)) { + pr_err("%s: couldn't get CL irq\n", __func__); + free_irq(IRQ_MAC_ADB_SR, NULL); + return -ENODEV; + } +#endif /* !CONFIG_PPC_PMAC */ pmu_fully_inited = 1; @@ -463,7 +515,7 @@ arch_initcall(via_pmu_start); */ static int __init via_pmu_dev_init(void) { - if (vias == NULL) + if (pmu_state == uninitialized) return -ENODEV; #ifdef CONFIG_PMAC_BACKLIGHT @@ -534,8 +586,9 @@ init_pmu(void) int timeout; struct adb_request req; - out_8(&via[B], via[B] | TREQ); /* negate TREQ */ - out_8(&via[DIRB], (via[DIRB] | TREQ) & ~TACK); /* TACK in, TREQ out */ + /* Negate TREQ. Set TACK to input and TREQ to output. */ + out_8(&via2[B], in_8(&via2[B]) | TREQ); + out_8(&via2[DIRB], (in_8(&via2[DIRB]) | TREQ) & ~TACK); pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask); timeout = 100000; @@ -587,6 +640,10 @@ init_pmu(void) option_server_mode ? "enabled" : "disabled"); } } + + printk(KERN_INFO "PMU driver v%d initialized for %s, firmware: %02x\n", + PMU_DRIVER_VERSION, pbook_type[pmu_kind], pmu_version); + return 1; } @@ -625,6 +682,7 @@ static void pmu_set_server_mode(int server_mode) static void done_battery_state_ohare(struct adb_request* req) { +#ifdef CONFIG_PPC_PMAC /* format: * [0] : flags * 0x01 : AC indicator @@ -706,6 +764,7 @@ done_battery_state_ohare(struct adb_request* req) pmu_batteries[pmu_cur_battery].amperage = amperage; pmu_batteries[pmu_cur_battery].voltage = voltage; pmu_batteries[pmu_cur_battery].time_remaining = time; +#endif /* CONFIG_PPC_PMAC */ clear_bit(0, &async_req_locks); } @@ -816,9 +875,9 @@ static int pmu_info_proc_show(struct seq_file *m, void *v) static int pmu_irqstats_proc_show(struct seq_file *m, void *v) { int i; - static const char *irq_names[] = { - "Total CB1 triggered events", - "Total GPIO1 triggered events", + static const char *irq_names[NUM_IRQ_STATS] = { + "Unknown interrupt (type 0)", + "Unknown interrupt (type 1)", "PC-Card eject button", "Sound/Brightness button", "ADB message", @@ -827,10 +886,12 @@ static int pmu_irqstats_proc_show(struct seq_file *m, void *v) "Tick timer", "Ghost interrupt (zero len)", "Empty interrupt (empty mask)", - "Max irqs in a row" + "Max irqs in a row", + "Total CB1 triggered events", + "Total GPIO1 triggered events", }; - for (i=0; i<11; i++) { + for (i = 0; i < NUM_IRQ_STATS; i++) { seq_printf(m, " %2u: %10u (%s)\n", i, pmu_irq_stats[i], irq_names[i]); } @@ -928,7 +989,7 @@ static int pmu_send_request(struct adb_request *req, int sync) { int i, ret; - if ((vias == NULL) || (!pmu_fully_inited)) { + if (pmu_state == uninitialized || !pmu_fully_inited) { req->complete = 1; return -ENXIO; } @@ -1022,7 +1083,7 @@ static int __pmu_adb_autopoll(int devs) static int pmu_adb_autopoll(int devs) { - if ((vias == NULL) || (!pmu_fully_inited) || !pmu_has_adb) + if (pmu_state == uninitialized || !pmu_fully_inited || !pmu_has_adb) return -ENXIO; adb_dev_map = devs; @@ -1035,7 +1096,7 @@ static int pmu_adb_reset_bus(void) struct adb_request req; int save_autopoll = adb_dev_map; - if ((vias == NULL) || (!pmu_fully_inited) || !pmu_has_adb) + if (pmu_state == uninitialized || !pmu_fully_inited || !pmu_has_adb) return -ENXIO; /* anyone got a better idea?? */ @@ -1071,7 +1132,7 @@ pmu_request(struct adb_request *req, void (*done)(struct adb_request *), va_list list; int i; - if (vias == NULL) + if (pmu_state == uninitialized) return -ENXIO; if (nbytes < 0 || nbytes > 32) { @@ -1096,7 +1157,7 @@ pmu_queue_request(struct adb_request *req) unsigned long flags; int nsend; - if (via == NULL) { + if (pmu_state == uninitialized) { req->complete = 1; return -ENXIO; } @@ -1136,7 +1197,7 @@ wait_for_ack(void) * reported */ int timeout = 4000; - while ((in_8(&via[B]) & TACK) == 0) { + while ((in_8(&via2[B]) & TACK) == 0) { if (--timeout < 0) { printk(KERN_ERR "PMU not responding (!ack)\n"); return; @@ -1150,23 +1211,19 @@ wait_for_ack(void) static inline void send_byte(int x) { - volatile unsigned char __iomem *v = via; - - out_8(&v[ACR], in_8(&v[ACR]) | SR_OUT | SR_EXT); - out_8(&v[SR], x); - out_8(&v[B], in_8(&v[B]) & ~TREQ); /* assert TREQ */ - (void)in_8(&v[B]); + out_8(&via1[ACR], in_8(&via1[ACR]) | SR_OUT | SR_EXT); + out_8(&via1[SR], x); + out_8(&via2[B], in_8(&via2[B]) & ~TREQ); /* assert TREQ */ + (void)in_8(&via2[B]); } static inline void recv_byte(void) { - volatile unsigned char __iomem *v = via; - - out_8(&v[ACR], (in_8(&v[ACR]) & ~SR_OUT) | SR_EXT); - in_8(&v[SR]); /* resets SR */ - out_8(&v[B], in_8(&v[B]) & ~TREQ); - (void)in_8(&v[B]); + out_8(&via1[ACR], (in_8(&via1[ACR]) & ~SR_OUT) | SR_EXT); + in_8(&via1[SR]); /* resets SR */ + out_8(&via2[B], in_8(&via2[B]) & ~TREQ); + (void)in_8(&via2[B]); } static inline void @@ -1209,7 +1266,7 @@ pmu_start(void) void pmu_poll(void) { - if (!via) + if (pmu_state == uninitialized) return; if (disable_poll) return; @@ -1219,7 +1276,7 @@ pmu_poll(void) void pmu_poll_adb(void) { - if (!via) + if (pmu_state == uninitialized) return; if (disable_poll) return; @@ -1234,7 +1291,7 @@ pmu_poll_adb(void) void pmu_wait_complete(struct adb_request *req) { - if (!via) + if (pmu_state == uninitialized) return; while((pmu_state != idle && pmu_state != locked) || !req->complete) via_pmu_interrupt(0, NULL); @@ -1250,7 +1307,7 @@ pmu_suspend(void) { unsigned long flags; - if (!via) + if (pmu_state == uninitialized) return; spin_lock_irqsave(&pmu_lock, flags); @@ -1269,7 +1326,7 @@ pmu_suspend(void) if (!adb_int_pending && pmu_state == idle && !req_awaiting_reply) { if (gpio_irq >= 0) disable_irq_nosync(gpio_irq); - out_8(&via[IER], CB1_INT | IER_CLR); + out_8(&via1[IER], CB1_INT | IER_CLR); spin_unlock_irqrestore(&pmu_lock, flags); break; } @@ -1281,7 +1338,7 @@ pmu_resume(void) { unsigned long flags; - if (!via || (pmu_suspended < 1)) + if (pmu_state == uninitialized || pmu_suspended < 1) return; spin_lock_irqsave(&pmu_lock, flags); @@ -1293,7 +1350,7 @@ pmu_resume(void) adb_int_pending = 1; if (gpio_irq >= 0) enable_irq(gpio_irq); - out_8(&via[IER], CB1_INT | IER_SET); + out_8(&via1[IER], CB1_INT | IER_SET); spin_unlock_irqrestore(&pmu_lock, flags); pmu_poll(); } @@ -1302,7 +1359,8 @@ pmu_resume(void) static void pmu_handle_data(unsigned char *data, int len) { - unsigned char ints, pirq; + unsigned char ints; + int idx; int i = 0; asleep = 0; @@ -1324,25 +1382,24 @@ pmu_handle_data(unsigned char *data, int len) ints &= ~(PMU_INT_ADB_AUTO | PMU_INT_AUTO_SRQ_POLL); next: - if (ints == 0) { if (i > pmu_irq_stats[10]) pmu_irq_stats[10] = i; return; } - - for (pirq = 0; pirq < 8; pirq++) - if (ints & (1 << pirq)) - break; - pmu_irq_stats[pirq]++; i++; - ints &= ~(1 << pirq); + + idx = ffs(ints) - 1; + ints &= ~BIT(idx); + + pmu_irq_stats[idx]++; /* Note: for some reason, we get an interrupt with len=1, * data[0]==0 after each normal ADB interrupt, at least * on the Pismo. Still investigating... --BenH */ - if ((1 << pirq) & PMU_INT_ADB) { + switch (BIT(idx)) { + case PMU_INT_ADB: if ((data[0] & PMU_INT_ADB_AUTO) == 0) { struct adb_request *req = req_awaiting_reply; if (!req) { @@ -1358,6 +1415,7 @@ next: } pmu_done(req); } else { +#ifdef CONFIG_XMON if (len == 4 && data[1] == 0x2c) { extern int xmon_wants_key, xmon_adb_keycode; if (xmon_wants_key) { @@ -1365,6 +1423,7 @@ next: return; } } +#endif /* CONFIG_XMON */ #ifdef CONFIG_ADB /* * XXX On the [23]400 the PMU gives us an up @@ -1378,25 +1437,28 @@ next: adb_input(data+1, len-1, 1); #endif /* CONFIG_ADB */ } - } + break; + /* Sound/brightness button pressed */ - else if ((1 << pirq) & PMU_INT_SNDBRT) { + case PMU_INT_SNDBRT: #ifdef CONFIG_PMAC_BACKLIGHT if (len == 3) pmac_backlight_set_legacy_brightness_pmu(data[1] >> 4); #endif - } + break; + /* Tick interrupt */ - else if ((1 << pirq) & PMU_INT_TICK) { - /* Environement or tick interrupt, query batteries */ + case PMU_INT_TICK: + /* Environment or tick interrupt, query batteries */ if (pmu_battery_count) { if ((--query_batt_timer) == 0) { query_battery_state(); query_batt_timer = BATTERY_POLLING_COUNT; } } - } - else if ((1 << pirq) & PMU_INT_ENVIRONMENT) { + break; + + case PMU_INT_ENVIRONMENT: if (pmu_battery_count) query_battery_state(); pmu_pass_intr(data, len); @@ -1406,7 +1468,9 @@ next: via_pmu_event(PMU_EVT_POWER, !!(data[1]&8)); via_pmu_event(PMU_EVT_LID, data[1]&1); } - } else { + break; + + default: pmu_pass_intr(data, len); } goto next; @@ -1418,21 +1482,20 @@ pmu_sr_intr(void) struct adb_request *req; int bite = 0; - if (via[B] & TREQ) { - printk(KERN_ERR "PMU: spurious SR intr (%x)\n", via[B]); - out_8(&via[IFR], SR_INT); + if (in_8(&via2[B]) & TREQ) { + printk(KERN_ERR "PMU: spurious SR intr (%x)\n", in_8(&via2[B])); return NULL; } /* The ack may not yet be low when we get the interrupt */ - while ((in_8(&via[B]) & TACK) != 0) + while ((in_8(&via2[B]) & TACK) != 0) ; /* if reading grab the byte, and reset the interrupt */ if (pmu_state == reading || pmu_state == reading_intr) - bite = in_8(&via[SR]); + bite = in_8(&via1[SR]); /* reset TREQ and wait for TACK to go high */ - out_8(&via[B], in_8(&via[B]) | TREQ); + out_8(&via2[B], in_8(&via2[B]) | TREQ); wait_for_ack(); switch (pmu_state) { @@ -1533,26 +1596,46 @@ via_pmu_interrupt(int irq, void *arg) ++disable_poll; for (;;) { - intr = in_8(&via[IFR]) & (SR_INT | CB1_INT); + /* On 68k Macs, VIA interrupts are dispatched individually. + * Unless we are polling, the relevant IRQ flag has already + * been cleared. + */ + intr = 0; + if (IS_ENABLED(CONFIG_PPC_PMAC) || !irq) { + intr = in_8(&via1[IFR]) & (SR_INT | CB1_INT); + out_8(&via1[IFR], intr); + } +#ifndef CONFIG_PPC_PMAC + switch (irq) { + case IRQ_MAC_ADB_CL: + intr = CB1_INT; + break; + case IRQ_MAC_ADB_SR: + intr = SR_INT; + break; + } +#endif if (intr == 0) break; handled = 1; if (++nloop > 1000) { printk(KERN_DEBUG "PMU: stuck in intr loop, " "intr=%x, ier=%x pmu_state=%d\n", - intr, in_8(&via[IER]), pmu_state); + intr, in_8(&via1[IER]), pmu_state); break; } - out_8(&via[IFR], intr); if (intr & CB1_INT) { adb_int_pending = 1; - pmu_irq_stats[0]++; + pmu_irq_stats[11]++; } if (intr & SR_INT) { req = pmu_sr_intr(); if (req) break; } +#ifndef CONFIG_PPC_PMAC + break; +#endif } recheck: @@ -1619,7 +1702,7 @@ pmu_unlock(void) } -static irqreturn_t +static __maybe_unused irqreturn_t gpio1_interrupt(int irq, void *arg) { unsigned long flags; @@ -1630,7 +1713,7 @@ gpio1_interrupt(int irq, void *arg) disable_irq_nosync(gpio_irq); gpio_irq_enabled = 0; } - pmu_irq_stats[1]++; + pmu_irq_stats[12]++; adb_int_pending = 1; spin_unlock_irqrestore(&pmu_lock, flags); via_pmu_interrupt(0, NULL); @@ -1644,7 +1727,7 @@ pmu_enable_irled(int on) { struct adb_request req; - if (vias == NULL) + if (pmu_state == uninitialized) return ; if (pmu_kind == PMU_KEYLARGO_BASED) return ; @@ -1659,7 +1742,7 @@ pmu_restart(void) { struct adb_request req; - if (via == NULL) + if (pmu_state == uninitialized) return; local_irq_disable(); @@ -1684,7 +1767,7 @@ pmu_shutdown(void) { struct adb_request req; - if (via == NULL) + if (pmu_state == uninitialized) return; local_irq_disable(); @@ -1712,7 +1795,7 @@ pmu_shutdown(void) int pmu_present(void) { - return via != NULL; + return pmu_state != uninitialized; } #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) @@ -1725,29 +1808,29 @@ static u32 save_via[8]; static void save_via_state(void) { - save_via[0] = in_8(&via[ANH]); - save_via[1] = in_8(&via[DIRA]); - save_via[2] = in_8(&via[B]); - save_via[3] = in_8(&via[DIRB]); - save_via[4] = in_8(&via[PCR]); - save_via[5] = in_8(&via[ACR]); - save_via[6] = in_8(&via[T1CL]); - save_via[7] = in_8(&via[T1CH]); + save_via[0] = in_8(&via1[ANH]); + save_via[1] = in_8(&via1[DIRA]); + save_via[2] = in_8(&via1[B]); + save_via[3] = in_8(&via1[DIRB]); + save_via[4] = in_8(&via1[PCR]); + save_via[5] = in_8(&via1[ACR]); + save_via[6] = in_8(&via1[T1CL]); + save_via[7] = in_8(&via1[T1CH]); } static void restore_via_state(void) { - out_8(&via[ANH], save_via[0]); - out_8(&via[DIRA], save_via[1]); - out_8(&via[B], save_via[2]); - out_8(&via[DIRB], save_via[3]); - out_8(&via[PCR], save_via[4]); - out_8(&via[ACR], save_via[5]); - out_8(&via[T1CL], save_via[6]); - out_8(&via[T1CH], save_via[7]); - out_8(&via[IER], IER_CLR | 0x7f); /* disable all intrs */ - out_8(&via[IFR], 0x7f); /* clear IFR */ - out_8(&via[IER], IER_SET | SR_INT | CB1_INT); + out_8(&via1[ANH], save_via[0]); + out_8(&via1[DIRA], save_via[1]); + out_8(&via1[B], save_via[2]); + out_8(&via1[DIRB], save_via[3]); + out_8(&via1[PCR], save_via[4]); + out_8(&via1[ACR], save_via[5]); + out_8(&via1[T1CL], save_via[6]); + out_8(&via1[T1CH], save_via[7]); + out_8(&via1[IER], IER_CLR | 0x7f); /* disable all intrs */ + out_8(&via1[IFR], 0x7f); /* clear IFR */ + out_8(&via1[IER], IER_SET | SR_INT | CB1_INT); } #define GRACKLE_PM (1<<7) @@ -2253,6 +2336,7 @@ static int pmu_ioctl(struct file *filp, int error = -EINVAL; switch (cmd) { +#ifdef CONFIG_PPC_PMAC case PMU_IOC_SLEEP: if (!capable(CAP_SYS_ADMIN)) return -EACCES; @@ -2262,6 +2346,7 @@ static int pmu_ioctl(struct file *filp, return put_user(0, argp); else return put_user(1, argp); +#endif #ifdef CONFIG_PMAC_BACKLIGHT_LEGACY /* Compatibility ioctl's for backlight */ @@ -2378,7 +2463,7 @@ static struct miscdevice pmu_device = { static int pmu_device_init(void) { - if (!via) + if (pmu_state == uninitialized) return 0; if (misc_register(&pmu_device) < 0) printk(KERN_ERR "via-pmu: cannot register misc device.\n"); @@ -2389,33 +2474,33 @@ device_initcall(pmu_device_init); #ifdef DEBUG_SLEEP static inline void -polled_handshake(volatile unsigned char __iomem *via) +polled_handshake(void) { - via[B] &= ~TREQ; eieio(); - while ((via[B] & TACK) != 0) + via2[B] &= ~TREQ; eieio(); + while ((via2[B] & TACK) != 0) ; - via[B] |= TREQ; eieio(); - while ((via[B] & TACK) == 0) + via2[B] |= TREQ; eieio(); + while ((via2[B] & TACK) == 0) ; } static inline void -polled_send_byte(volatile unsigned char __iomem *via, int x) +polled_send_byte(int x) { - via[ACR] |= SR_OUT | SR_EXT; eieio(); - via[SR] = x; eieio(); - polled_handshake(via); + via1[ACR] |= SR_OUT | SR_EXT; eieio(); + via1[SR] = x; eieio(); + polled_handshake(); } static inline int -polled_recv_byte(volatile unsigned char __iomem *via) +polled_recv_byte(void) { int x; - via[ACR] = (via[ACR] & ~SR_OUT) | SR_EXT; eieio(); - x = via[SR]; eieio(); - polled_handshake(via); - x = via[SR]; eieio(); + via1[ACR] = (via1[ACR] & ~SR_OUT) | SR_EXT; eieio(); + x = via1[SR]; eieio(); + polled_handshake(); + x = via1[SR]; eieio(); return x; } @@ -2424,7 +2509,6 @@ pmu_polled_request(struct adb_request *req) { unsigned long flags; int i, l, c; - volatile unsigned char __iomem *v = via; req->complete = 1; c = req->data[0]; @@ -2436,21 +2520,21 @@ pmu_polled_request(struct adb_request *req) while (pmu_state != idle) pmu_poll(); - while ((via[B] & TACK) == 0) + while ((via2[B] & TACK) == 0) ; - polled_send_byte(v, c); + polled_send_byte(c); if (l < 0) { l = req->nbytes - 1; - polled_send_byte(v, l); + polled_send_byte(l); } for (i = 1; i <= l; ++i) - polled_send_byte(v, req->data[i]); + polled_send_byte(req->data[i]); l = pmu_data_len[c][1]; if (l < 0) - l = polled_recv_byte(v); + l = polled_recv_byte(); for (i = 0; i < l; ++i) - req->reply[i + req->reply_len] = polled_recv_byte(v); + req->reply[i + req->reply_len] = polled_recv_byte(); if (req->done) (*req->done)(req); diff --git a/drivers/macintosh/via-pmu68k.c b/drivers/macintosh/via-pmu68k.c deleted file mode 100644 index d545ed45e482..000000000000 --- a/drivers/macintosh/via-pmu68k.c +++ /dev/null @@ -1,850 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Device driver for the PMU on 68K-based Apple PowerBooks - * - * The VIA (versatile interface adapter) interfaces to the PMU, - * a 6805 microprocessor core whose primary function is to control - * battery charging and system power on the PowerBooks. - * The PMU also controls the ADB (Apple Desktop Bus) which connects - * to the keyboard and mouse, as well as the non-volatile RAM - * and the RTC (real time clock) chip. - * - * Adapted for 68K PMU by Joshua M. Thompson - * - * Based largely on the PowerMac PMU code by Paul Mackerras and - * Fabio Riccardi. - * - * Also based on the PMU driver from MkLinux by Apple Computer, Inc. - * and the Open Software Foundation, Inc. - */ - -#include <stdarg.h> -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/kernel.h> -#include <linux/delay.h> -#include <linux/miscdevice.h> -#include <linux/blkdev.h> -#include <linux/pci.h> -#include <linux/init.h> -#include <linux/interrupt.h> - -#include <linux/adb.h> -#include <linux/pmu.h> -#include <linux/cuda.h> - -#include <asm/macintosh.h> -#include <asm/macints.h> -#include <asm/mac_via.h> - -#include <asm/pgtable.h> -#include <asm/irq.h> -#include <linux/uaccess.h> - -/* Misc minor number allocated for /dev/pmu */ -#define PMU_MINOR 154 - -/* VIA registers - spaced 0x200 bytes apart */ -#define RS 0x200 /* skip between registers */ -#define B 0 /* B-side data */ -#define A RS /* A-side data */ -#define DIRB (2*RS) /* B-side direction (1=output) */ -#define DIRA (3*RS) /* A-side direction (1=output) */ -#define T1CL (4*RS) /* Timer 1 ctr/latch (low 8 bits) */ -#define T1CH (5*RS) /* Timer 1 counter (high 8 bits) */ -#define T1LL (6*RS) /* Timer 1 latch (low 8 bits) */ -#define T1LH (7*RS) /* Timer 1 latch (high 8 bits) */ -#define T2CL (8*RS) /* Timer 2 ctr/latch (low 8 bits) */ -#define T2CH (9*RS) /* Timer 2 counter (high 8 bits) */ -#define SR (10*RS) /* Shift register */ -#define ACR (11*RS) /* Auxiliary control register */ -#define PCR (12*RS) /* Peripheral control register */ -#define IFR (13*RS) /* Interrupt flag register */ -#define IER (14*RS) /* Interrupt enable register */ -#define ANH (15*RS) /* A-side data, no handshake */ - -/* Bits in B data register: both active low */ -#define TACK 0x02 /* Transfer acknowledge (input) */ -#define TREQ 0x04 /* Transfer request (output) */ - -/* Bits in ACR */ -#define SR_CTRL 0x1c /* Shift register control bits */ -#define SR_EXT 0x0c /* Shift on external clock */ -#define SR_OUT 0x10 /* Shift out if 1 */ - -/* Bits in IFR and IER */ -#define SR_INT 0x04 /* Shift register full/empty */ -#define CB1_INT 0x10 /* transition on CB1 input */ - -static enum pmu_state { - idle, - sending, - intack, - reading, - reading_intr, -} pmu_state; - -static struct adb_request *current_req; -static struct adb_request *last_req; -static struct adb_request *req_awaiting_reply; -static unsigned char interrupt_data[32]; -static unsigned char *reply_ptr; -static int data_index; -static int data_len; -static int adb_int_pending; -static int pmu_adb_flags; -static int adb_dev_map; -static struct adb_request bright_req_1, bright_req_2, bright_req_3; -static int pmu_kind = PMU_UNKNOWN; -static int pmu_fully_inited; - -int asleep; - -static int pmu_probe(void); -static int pmu_init(void); -static void pmu_start(void); -static irqreturn_t pmu_interrupt(int irq, void *arg); -static int pmu_send_request(struct adb_request *req, int sync); -static int pmu_autopoll(int devs); -void pmu_poll(void); -static int pmu_reset_bus(void); - -static int init_pmu(void); -static void pmu_start(void); -static void send_byte(int x); -static void recv_byte(void); -static void pmu_done(struct adb_request *req); -static void pmu_handle_data(unsigned char *data, int len); -static void set_volume(int level); -static void pmu_enable_backlight(int on); -static void pmu_set_brightness(int level); - -struct adb_driver via_pmu_driver = { - .name = "68K PMU", - .probe = pmu_probe, - .init = pmu_init, - .send_request = pmu_send_request, - .autopoll = pmu_autopoll, - .poll = pmu_poll, - .reset_bus = pmu_reset_bus, -}; - -/* - * This table indicates for each PMU opcode: - * - the number of data bytes to be sent with the command, or -1 - * if a length byte should be sent, - * - the number of response bytes which the PMU will return, or - * -1 if it will send a length byte. - */ -static s8 pmu_data_len[256][2] = { -/* 0 1 2 3 4 5 6 7 */ -/*00*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, -/*08*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, -/*10*/ { 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, -/*18*/ { 0, 1},{ 0, 1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{ 0, 0}, -/*20*/ {-1, 0},{ 0, 0},{ 2, 0},{ 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0}, -/*28*/ { 0,-1},{ 0,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{ 0,-1}, -/*30*/ { 4, 0},{20, 0},{-1, 0},{ 3, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, -/*38*/ { 0, 4},{ 0,20},{ 2,-1},{ 2, 1},{ 3,-1},{-1,-1},{-1,-1},{ 4, 0}, -/*40*/ { 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, -/*48*/ { 0, 1},{ 0, 1},{-1,-1},{ 1, 0},{ 1, 0},{-1,-1},{-1,-1},{-1,-1}, -/*50*/ { 1, 0},{ 0, 0},{ 2, 0},{ 2, 0},{-1, 0},{ 1, 0},{ 3, 0},{ 1, 0}, -/*58*/ { 0, 1},{ 1, 0},{ 0, 2},{ 0, 2},{ 0,-1},{-1,-1},{-1,-1},{-1,-1}, -/*60*/ { 2, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, -/*68*/ { 0, 3},{ 0, 3},{ 0, 2},{ 0, 8},{ 0,-1},{ 0,-1},{-1,-1},{-1,-1}, -/*70*/ { 1, 0},{ 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, -/*78*/ { 0,-1},{ 0,-1},{-1,-1},{-1,-1},{-1,-1},{ 5, 1},{ 4, 1},{ 4, 1}, -/*80*/ { 4, 0},{-1, 0},{ 0, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, -/*88*/ { 0, 5},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, -/*90*/ { 1, 0},{ 2, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, -/*98*/ { 0, 1},{ 0, 1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, -/*a0*/ { 2, 0},{ 2, 0},{ 2, 0},{ 4, 0},{-1, 0},{ 0, 0},{-1, 0},{-1, 0}, -/*a8*/ { 1, 1},{ 1, 0},{ 3, 0},{ 2, 0},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, -/*b0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, -/*b8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, -/*c0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, -/*c8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, -/*d0*/ { 0, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, -/*d8*/ { 1, 1},{ 1, 1},{-1,-1},{-1,-1},{ 0, 1},{ 0,-1},{-1,-1},{-1,-1}, -/*e0*/ {-1, 0},{ 4, 0},{ 0, 1},{-1, 0},{-1, 0},{ 4, 0},{-1, 0},{-1, 0}, -/*e8*/ { 3,-1},{-1,-1},{ 0, 1},{-1,-1},{ 0,-1},{-1,-1},{-1,-1},{ 0, 0}, -/*f0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, -/*f8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, -}; - -int __init find_via_pmu(void) -{ - switch (macintosh_config->adb_type) { - case MAC_ADB_PB1: - pmu_kind = PMU_68K_V1; - break; - case MAC_ADB_PB2: - pmu_kind = PMU_68K_V2; - break; - default: - pmu_kind = PMU_UNKNOWN; - return -ENODEV; - } - - pmu_state = idle; - - if (!init_pmu()) - goto fail_init; - - pr_info("adb: PMU 68K driver v0.5 for Unified ADB\n"); - - return 1; - -fail_init: - pmu_kind = PMU_UNKNOWN; - return 0; -} - -static int pmu_probe(void) -{ - if (pmu_kind == PMU_UNKNOWN) - return -ENODEV; - return 0; -} - -static int pmu_init(void) -{ - if (pmu_kind == PMU_UNKNOWN) - return -ENODEV; - return 0; -} - -static int __init via_pmu_start(void) -{ - if (pmu_kind == PMU_UNKNOWN) - return -ENODEV; - - if (request_irq(IRQ_MAC_ADB_SR, pmu_interrupt, 0, "PMU_SR", - pmu_interrupt)) { - pr_err("%s: can't get SR irq\n", __func__); - return -ENODEV; - } - if (request_irq(IRQ_MAC_ADB_CL, pmu_interrupt, 0, "PMU_CL", - pmu_interrupt)) { - pr_err("%s: can't get CL irq\n", __func__); - free_irq(IRQ_MAC_ADB_SR, pmu_interrupt); - return -ENODEV; - } - - pmu_fully_inited = 1; - - /* Enable backlight */ - pmu_enable_backlight(1); - - return 0; -} - -arch_initcall(via_pmu_start); - -static int __init init_pmu(void) -{ - int timeout; - volatile struct adb_request req; - - via2[B] |= TREQ; /* negate TREQ */ - via2[DIRB] = (via2[DIRB] | TREQ) & ~TACK; /* TACK in, TREQ out */ - - pmu_request((struct adb_request *) &req, NULL, 2, PMU_SET_INTR_MASK, PMU_INT_ADB); - timeout = 100000; - while (!req.complete) { - if (--timeout < 0) { - printk(KERN_ERR "pmu_init: no response from PMU\n"); - return -EAGAIN; - } - udelay(10); - pmu_poll(); - } - - /* ack all pending interrupts */ - timeout = 100000; - interrupt_data[0] = 1; - while (interrupt_data[0] || pmu_state != idle) { - if (--timeout < 0) { - printk(KERN_ERR "pmu_init: timed out acking intrs\n"); - return -EAGAIN; - } - if (pmu_state == idle) { - adb_int_pending = 1; - pmu_interrupt(0, NULL); - } - pmu_poll(); - udelay(10); - } - - pmu_request((struct adb_request *) &req, NULL, 2, PMU_SET_INTR_MASK, - PMU_INT_ADB_AUTO|PMU_INT_SNDBRT|PMU_INT_ADB); - timeout = 100000; - while (!req.complete) { - if (--timeout < 0) { - printk(KERN_ERR "pmu_init: no response from PMU\n"); - return -EAGAIN; - } - udelay(10); - pmu_poll(); - } - - bright_req_1.complete = 1; - bright_req_2.complete = 1; - bright_req_3.complete = 1; - - return 1; -} - -int -pmu_get_model(void) -{ - return pmu_kind; -} - -/* Send an ADB command */ -static int -pmu_send_request(struct adb_request *req, int sync) -{ - int i, ret; - - if (!pmu_fully_inited) - { - req->complete = 1; - return -ENXIO; - } - - ret = -EINVAL; - - switch (req->data[0]) { - case PMU_PACKET: - for (i = 0; i < req->nbytes - 1; ++i) - req->data[i] = req->data[i+1]; - --req->nbytes; - if (pmu_data_len[req->data[0]][1] != 0) { - req->reply[0] = ADB_RET_OK; - req->reply_len = 1; - } else - req->reply_len = 0; - ret = pmu_queue_request(req); - break; - case CUDA_PACKET: - switch (req->data[1]) { - case CUDA_GET_TIME: - if (req->nbytes != 2) - break; - req->data[0] = PMU_READ_RTC; - req->nbytes = 1; - req->reply_len = 3; - req->reply[0] = CUDA_PACKET; - req->reply[1] = 0; - req->reply[2] = CUDA_GET_TIME; - ret = pmu_queue_request(req); - break; - case CUDA_SET_TIME: - if (req->nbytes != 6) - break; - req->data[0] = PMU_SET_RTC; - req->nbytes = 5; - for (i = 1; i <= 4; ++i) - req->data[i] = req->data[i+1]; - req->reply_len = 3; - req->reply[0] = CUDA_PACKET; - req->reply[1] = 0; - req->reply[2] = CUDA_SET_TIME; - ret = pmu_queue_request(req); - break; - case CUDA_GET_PRAM: - if (req->nbytes != 4) - break; - req->data[0] = PMU_READ_NVRAM; - req->data[1] = req->data[2]; - req->data[2] = req->data[3]; - req->nbytes = 3; - req->reply_len = 3; - req->reply[0] = CUDA_PACKET; - req->reply[1] = 0; - req->reply[2] = CUDA_GET_PRAM; - ret = pmu_queue_request(req); - break; - case CUDA_SET_PRAM: - if (req->nbytes != 5) - break; - req->data[0] = PMU_WRITE_NVRAM; - req->data[1] = req->data[2]; - req->data[2] = req->data[3]; - req->data[3] = req->data[4]; - req->nbytes = 4; - req->reply_len = 3; - req->reply[0] = CUDA_PACKET; - req->reply[1] = 0; - req->reply[2] = CUDA_SET_PRAM; - ret = pmu_queue_request(req); - break; - } - break; - case ADB_PACKET: - for (i = req->nbytes - 1; i > 1; --i) - req->data[i+2] = req->data[i]; - req->data[3] = req->nbytes - 2; - req->data[2] = pmu_adb_flags; - /*req->data[1] = req->data[1];*/ - req->data[0] = PMU_ADB_CMD; - req->nbytes += 2; - req->reply_expected = 1; - req->reply_len = 0; - ret = pmu_queue_request(req); - break; - } - if (ret) - { - req->complete = 1; - return ret; - } - - if (sync) { - while (!req->complete) - pmu_poll(); - } - - return 0; -} - -/* Enable/disable autopolling */ -static int -pmu_autopoll(int devs) -{ - struct adb_request req; - - if (!pmu_fully_inited) return -ENXIO; - - if (devs) { - adb_dev_map = devs; - pmu_request(&req, NULL, 5, PMU_ADB_CMD, 0, 0x86, - adb_dev_map >> 8, adb_dev_map); - pmu_adb_flags = 2; - } else { - pmu_request(&req, NULL, 1, PMU_ADB_POLL_OFF); - pmu_adb_flags = 0; - } - while (!req.complete) - pmu_poll(); - return 0; -} - -/* Reset the ADB bus */ -static int -pmu_reset_bus(void) -{ - struct adb_request req; - long timeout; - int save_autopoll = adb_dev_map; - - if (!pmu_fully_inited) return -ENXIO; - - /* anyone got a better idea?? */ - pmu_autopoll(0); - - req.nbytes = 5; - req.done = NULL; - req.data[0] = PMU_ADB_CMD; - req.data[1] = 0; - req.data[2] = 3; /* ADB_BUSRESET ??? */ - req.data[3] = 0; - req.data[4] = 0; - req.reply_len = 0; - req.reply_expected = 1; - if (pmu_queue_request(&req) != 0) - { - printk(KERN_ERR "pmu_adb_reset_bus: pmu_queue_request failed\n"); - return -EIO; - } - while (!req.complete) - pmu_poll(); - timeout = 100000; - while (!req.complete) { - if (--timeout < 0) { - printk(KERN_ERR "pmu_adb_reset_bus (reset): no response from PMU\n"); - return -EIO; - } - udelay(10); - pmu_poll(); - } - - if (save_autopoll != 0) - pmu_autopoll(save_autopoll); - - return 0; -} - -/* Construct and send a pmu request */ -int -pmu_request(struct adb_request *req, void (*done)(struct adb_request *), - int nbytes, ...) -{ - va_list list; - int i; - - if (nbytes < 0 || nbytes > 32) { - printk(KERN_ERR "pmu_request: bad nbytes (%d)\n", nbytes); - req->complete = 1; - return -EINVAL; - } - req->nbytes = nbytes; - req->done = done; - va_start(list, nbytes); - for (i = 0; i < nbytes; ++i) - req->data[i] = va_arg(list, int); - va_end(list); - if (pmu_data_len[req->data[0]][1] != 0) { - req->reply[0] = ADB_RET_OK; - req->reply_len = 1; - } else - req->reply_len = 0; - req->reply_expected = 0; - return pmu_queue_request(req); -} - -int -pmu_queue_request(struct adb_request *req) -{ - unsigned long flags; - int nsend; - - if (req->nbytes <= 0) { - req->complete = 1; - return 0; - } - nsend = pmu_data_len[req->data[0]][0]; - if (nsend >= 0 && req->nbytes != nsend + 1) { - req->complete = 1; - return -EINVAL; - } - - req->next = NULL; - req->sent = 0; - req->complete = 0; - local_irq_save(flags); - - if (current_req != 0) { - last_req->next = req; - last_req = req; - } else { - current_req = req; - last_req = req; - if (pmu_state == idle) - pmu_start(); - } - - local_irq_restore(flags); - return 0; -} - -static void -send_byte(int x) -{ - via1[ACR] |= SR_CTRL; - via1[SR] = x; - via2[B] &= ~TREQ; /* assert TREQ */ -} - -static void -recv_byte(void) -{ - char c; - - via1[ACR] = (via1[ACR] | SR_EXT) & ~SR_OUT; - c = via1[SR]; /* resets SR */ - via2[B] &= ~TREQ; -} - -static void -pmu_start(void) -{ - unsigned long flags; - struct adb_request *req; - - /* assert pmu_state == idle */ - /* get the packet to send */ - local_irq_save(flags); - req = current_req; - if (req == 0 || pmu_state != idle - || (req->reply_expected && req_awaiting_reply)) - goto out; - - pmu_state = sending; - data_index = 1; - data_len = pmu_data_len[req->data[0]][0]; - - /* set the shift register to shift out and send a byte */ - send_byte(req->data[0]); - -out: - local_irq_restore(flags); -} - -void -pmu_poll(void) -{ - unsigned long flags; - - local_irq_save(flags); - if (via1[IFR] & SR_INT) { - via1[IFR] = SR_INT; - pmu_interrupt(IRQ_MAC_ADB_SR, NULL); - } - if (via1[IFR] & CB1_INT) { - via1[IFR] = CB1_INT; - pmu_interrupt(IRQ_MAC_ADB_CL, NULL); - } - local_irq_restore(flags); -} - -static irqreturn_t -pmu_interrupt(int irq, void *dev_id) -{ - struct adb_request *req; - int timeout, bite = 0; /* to prevent compiler warning */ - -#if 0 - printk("pmu_interrupt: irq %d state %d acr %02X, b %02X data_index %d/%d adb_int_pending %d\n", - irq, pmu_state, (uint) via1[ACR], (uint) via2[B], data_index, data_len, adb_int_pending); -#endif - - if (irq == IRQ_MAC_ADB_CL) { /* CB1 interrupt */ - adb_int_pending = 1; - } else if (irq == IRQ_MAC_ADB_SR) { /* SR interrupt */ - if (via2[B] & TACK) { - printk(KERN_DEBUG "PMU: SR_INT but ack still high! (%x)\n", via2[B]); - } - - /* if reading grab the byte */ - if ((via1[ACR] & SR_OUT) == 0) bite = via1[SR]; - - /* reset TREQ and wait for TACK to go high */ - via2[B] |= TREQ; - timeout = 3200; - while (!(via2[B] & TACK)) { - if (--timeout < 0) { - printk(KERN_ERR "PMU not responding (!ack)\n"); - goto finish; - } - udelay(10); - } - - switch (pmu_state) { - case sending: - req = current_req; - if (data_len < 0) { - data_len = req->nbytes - 1; - send_byte(data_len); - break; - } - if (data_index <= data_len) { - send_byte(req->data[data_index++]); - break; - } - req->sent = 1; - data_len = pmu_data_len[req->data[0]][1]; - if (data_len == 0) { - pmu_state = idle; - current_req = req->next; - if (req->reply_expected) - req_awaiting_reply = req; - else - pmu_done(req); - } else { - pmu_state = reading; - data_index = 0; - reply_ptr = req->reply + req->reply_len; - recv_byte(); - } - break; - - case intack: - data_index = 0; - data_len = -1; - pmu_state = reading_intr; - reply_ptr = interrupt_data; - recv_byte(); - break; - - case reading: - case reading_intr: - if (data_len == -1) { - data_len = bite; - if (bite > 32) - printk(KERN_ERR "PMU: bad reply len %d\n", - bite); - } else { - reply_ptr[data_index++] = bite; - } - if (data_index < data_len) { - recv_byte(); - break; - } - - if (pmu_state == reading_intr) { - pmu_handle_data(interrupt_data, data_index); - } else { - req = current_req; - current_req = req->next; - req->reply_len += data_index; - pmu_done(req); - } - pmu_state = idle; - - break; - - default: - printk(KERN_ERR "pmu_interrupt: unknown state %d?\n", - pmu_state); - } - } -finish: - if (pmu_state == idle) { - if (adb_int_pending) { - pmu_state = intack; - send_byte(PMU_INT_ACK); - adb_int_pending = 0; - } else if (current_req) { - pmu_start(); - } - } - -#if 0 - printk("pmu_interrupt: exit state %d acr %02X, b %02X data_index %d/%d adb_int_pending %d\n", - pmu_state, (uint) via1[ACR], (uint) via2[B], data_index, data_len, adb_int_pending); -#endif - return IRQ_HANDLED; -} - -static void -pmu_done(struct adb_request *req) -{ - req->complete = 1; - if (req->done) - (*req->done)(req); -} - -/* Interrupt data could be the result data from an ADB cmd */ -static void -pmu_handle_data(unsigned char *data, int len) -{ - static int show_pmu_ints = 1; - - asleep = 0; - if (len < 1) { - adb_int_pending = 0; - return; - } - if (data[0] & PMU_INT_ADB) { - if ((data[0] & PMU_INT_ADB_AUTO) == 0) { - struct adb_request *req = req_awaiting_reply; - if (req == 0) { - printk(KERN_ERR "PMU: extra ADB reply\n"); - return; - } - req_awaiting_reply = NULL; - if (len <= 2) - req->reply_len = 0; - else { - memcpy(req->reply, data + 1, len - 1); - req->reply_len = len - 1; - } - pmu_done(req); - } else { - adb_input(data+1, len-1, 1); - } - } else { - if (data[0] == 0x08 && len == 3) { - /* sound/brightness buttons pressed */ - pmu_set_brightness(data[1] >> 3); - set_volume(data[2]); - } else if (show_pmu_ints - && !(data[0] == PMU_INT_TICK && len == 1)) { - int i; - printk(KERN_DEBUG "pmu intr"); - for (i = 0; i < len; ++i) - printk(" %.2x", data[i]); - printk("\n"); - } - } -} - -static int backlight_level = -1; -static int backlight_enabled = 0; - -#define LEVEL_TO_BRIGHT(lev) ((lev) < 1? 0x7f: 0x4a - ((lev) << 1)) - -static void -pmu_enable_backlight(int on) -{ - struct adb_request req; - - if (on) { - /* first call: get current backlight value */ - if (backlight_level < 0) { - switch(pmu_kind) { - case PMU_68K_V1: - case PMU_68K_V2: - pmu_request(&req, NULL, 3, PMU_READ_NVRAM, 0x14, 0xe); - while (!req.complete) - pmu_poll(); - printk(KERN_DEBUG "pmu: nvram returned bright: %d\n", (int)req.reply[1]); - backlight_level = req.reply[1]; - break; - default: - backlight_enabled = 0; - return; - } - } - pmu_request(&req, NULL, 2, PMU_BACKLIGHT_BRIGHT, - LEVEL_TO_BRIGHT(backlight_level)); - while (!req.complete) - pmu_poll(); - } - pmu_request(&req, NULL, 2, PMU_POWER_CTRL, - PMU_POW_BACKLIGHT | (on ? PMU_POW_ON : PMU_POW_OFF)); - while (!req.complete) - pmu_poll(); - backlight_enabled = on; -} - -static void -pmu_set_brightness(int level) -{ - int bright; - - backlight_level = level; - bright = LEVEL_TO_BRIGHT(level); - if (!backlight_enabled) - return; - if (bright_req_1.complete) - pmu_request(&bright_req_1, NULL, 2, PMU_BACKLIGHT_BRIGHT, - bright); - if (bright_req_2.complete) - pmu_request(&bright_req_2, NULL, 2, PMU_POWER_CTRL, - PMU_POW_BACKLIGHT | (bright < 0x7f ? PMU_POW_ON : PMU_POW_OFF)); -} - -void -pmu_enable_irled(int on) -{ - struct adb_request req; - - pmu_request(&req, NULL, 2, PMU_POWER_CTRL, PMU_POW_IRLED | - (on ? PMU_POW_ON : PMU_POW_OFF)); - while (!req.complete) - pmu_poll(); -} - -static void -set_volume(int level) -{ -} - -int -pmu_present(void) -{ - return (pmu_kind != PMU_UNKNOWN); -} diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig index 93397cb05b15..3ce933707828 100644 --- a/drivers/misc/cxl/Kconfig +++ b/drivers/misc/cxl/Kconfig @@ -33,11 +33,3 @@ config CXL CAPI adapters are found in POWER8 based systems. If unsure, say N. - -config CXL_BIMODAL - bool "Support for bi-modal CAPI cards" - depends on HOTPLUG_PCI_POWERNV = y && CXL || HOTPLUG_PCI_POWERNV = m && CXL = m - default y - help - Select this option to enable support for bi-modal CAPI cards, such as - the Mellanox CX-4. diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile index 502d41fc9ea5..5eea61b9584f 100644 --- a/drivers/misc/cxl/Makefile +++ b/drivers/misc/cxl/Makefile @@ -4,7 +4,7 @@ ccflags-$(CONFIG_PPC_WERROR) += -Werror cxl-y += main.o file.o irq.o fault.o native.o cxl-y += context.o sysfs.o pci.o trace.o -cxl-y += vphb.o phb.o api.o cxllib.o +cxl-y += vphb.o api.o cxllib.o cxl-$(CONFIG_PPC_PSERIES) += flash.o guest.o of.o hcalls.o cxl-$(CONFIG_DEBUG_FS) += debugfs.o obj-$(CONFIG_CXL) += cxl.o diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index 8fd5ec4d6042..750470ef2049 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c @@ -11,7 +11,6 @@ #include <linux/slab.h> #include <linux/file.h> #include <misc/cxl.h> -#include <linux/msi.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/sched/mm.h> @@ -168,21 +167,6 @@ static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num) return 0; } -int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq) -{ - if (*ctx == NULL || *afu_irq == 0) { - *afu_irq = 1; - *ctx = cxl_get_context(pdev); - } else { - (*afu_irq)++; - if (*afu_irq > cxl_get_max_irqs_per_process(pdev)) { - *ctx = list_next_entry(*ctx, extra_irq_contexts); - *afu_irq = 1; - } - } - return cxl_find_afu_irq(*ctx, *afu_irq); -} -/* Exported via cxl_base */ int cxl_set_priv(struct cxl_context *ctx, void *priv) { @@ -310,7 +294,6 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed, if (task) { ctx->pid = get_task_pid(task, PIDTYPE_PID); kernel = false; - ctx->real_mode = false; /* acquire a reference to the task's mm */ ctx->mm = get_task_mm(current); @@ -374,24 +357,6 @@ void cxl_set_master(struct cxl_context *ctx) } EXPORT_SYMBOL_GPL(cxl_set_master); -int cxl_set_translation_mode(struct cxl_context *ctx, bool real_mode) -{ - if (ctx->status == STARTED) { - /* - * We could potentially update the PE and issue an update LLCMD - * to support this, but it doesn't seem to have a good use case - * since it's trivial to just create a second kernel context - * with different translation modes, so until someone convinces - * me otherwise: - */ - return -EBUSY; - } - - ctx->real_mode = real_mode; - return 0; -} -EXPORT_SYMBOL_GPL(cxl_set_translation_mode); - /* wrappers around afu_* file ops which are EXPORTED */ int cxl_fd_open(struct inode *inode, struct file *file) { @@ -573,100 +538,3 @@ ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count) return cxl_ops->read_adapter_vpd(afu->adapter, buf, count); } EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd); - -int cxl_set_max_irqs_per_process(struct pci_dev *dev, int irqs) -{ - struct cxl_afu *afu = cxl_pci_to_afu(dev); - if (IS_ERR(afu)) - return -ENODEV; - - if (irqs > afu->adapter->user_irqs) - return -EINVAL; - - /* Limit user_irqs to prevent the user increasing this via sysfs */ - afu->adapter->user_irqs = irqs; - afu->irqs_max = irqs; - - return 0; -} -EXPORT_SYMBOL_GPL(cxl_set_max_irqs_per_process); - -int cxl_get_max_irqs_per_process(struct pci_dev *dev) -{ - struct cxl_afu *afu = cxl_pci_to_afu(dev); - if (IS_ERR(afu)) - return -ENODEV; - - return afu->irqs_max; -} -EXPORT_SYMBOL_GPL(cxl_get_max_irqs_per_process); - -/* - * This is a special interrupt allocation routine called from the PHB's MSI - * setup function. When capi interrupts are allocated in this manner they must - * still be associated with a running context, but since the MSI APIs have no - * way to specify this we use the default context associated with the device. - * - * The Mellanox CX4 has a hardware limitation that restricts the maximum AFU - * interrupt number, so in order to overcome this their driver informs us of - * the restriction by setting the maximum interrupts per context, and we - * allocate additional contexts as necessary so that we can keep the AFU - * interrupt number within the supported range. - */ -int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) -{ - struct cxl_context *ctx, *new_ctx, *default_ctx; - int remaining; - int rc; - - ctx = default_ctx = cxl_get_context(pdev); - if (WARN_ON(!default_ctx)) - return -ENODEV; - - remaining = nvec; - while (remaining > 0) { - rc = cxl_allocate_afu_irqs(ctx, min(remaining, ctx->afu->irqs_max)); - if (rc) { - pr_warn("%s: Failed to find enough free MSIs\n", pci_name(pdev)); - return rc; - } - remaining -= ctx->afu->irqs_max; - - if (ctx != default_ctx && default_ctx->status == STARTED) { - WARN_ON(cxl_start_context(ctx, - be64_to_cpu(default_ctx->elem->common.wed), - NULL)); - } - - if (remaining > 0) { - new_ctx = cxl_dev_context_init(pdev); - if (IS_ERR(new_ctx)) { - pr_warn("%s: Failed to allocate enough contexts for MSIs\n", pci_name(pdev)); - return -ENOSPC; - } - list_add(&new_ctx->extra_irq_contexts, &ctx->extra_irq_contexts); - ctx = new_ctx; - } - } - - return 0; -} -/* Exported via cxl_base */ - -void _cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev) -{ - struct cxl_context *ctx, *pos, *tmp; - - ctx = cxl_get_context(pdev); - if (WARN_ON(!ctx)) - return; - - cxl_free_afu_irqs(ctx); - list_for_each_entry_safe(pos, tmp, &ctx->extra_irq_contexts, extra_irq_contexts) { - cxl_stop_context(pos); - cxl_free_afu_irqs(pos); - list_del(&pos->extra_irq_contexts); - cxl_release_context(pos); - } -} -/* Exported via cxl_base */ diff --git a/drivers/misc/cxl/base.c b/drivers/misc/cxl/base.c index cd54ce6f6230..7557835cdfcd 100644 --- a/drivers/misc/cxl/base.c +++ b/drivers/misc/cxl/base.c @@ -106,89 +106,6 @@ int cxl_update_properties(struct device_node *dn, } EXPORT_SYMBOL_GPL(cxl_update_properties); -/* - * API calls into the driver that may be called from the PHB code and must be - * built in. - */ -bool cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu) -{ - bool ret; - struct cxl_calls *calls; - - calls = cxl_calls_get(); - if (!calls) - return false; - - ret = calls->cxl_pci_associate_default_context(dev, afu); - - cxl_calls_put(calls); - - return ret; -} -EXPORT_SYMBOL_GPL(cxl_pci_associate_default_context); - -void cxl_pci_disable_device(struct pci_dev *dev) -{ - struct cxl_calls *calls; - - calls = cxl_calls_get(); - if (!calls) - return; - - calls->cxl_pci_disable_device(dev); - - cxl_calls_put(calls); -} -EXPORT_SYMBOL_GPL(cxl_pci_disable_device); - -int cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq) -{ - int ret; - struct cxl_calls *calls; - - calls = cxl_calls_get(); - if (!calls) - return -EBUSY; - - ret = calls->cxl_next_msi_hwirq(pdev, ctx, afu_irq); - - cxl_calls_put(calls); - - return ret; -} -EXPORT_SYMBOL_GPL(cxl_next_msi_hwirq); - -int cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) -{ - int ret; - struct cxl_calls *calls; - - calls = cxl_calls_get(); - if (!calls) - return false; - - ret = calls->cxl_cx4_setup_msi_irqs(pdev, nvec, type); - - cxl_calls_put(calls); - - return ret; -} -EXPORT_SYMBOL_GPL(cxl_cx4_setup_msi_irqs); - -void cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev) -{ - struct cxl_calls *calls; - - calls = cxl_calls_get(); - if (!calls) - return; - - calls->cxl_cx4_teardown_msi_irqs(pdev); - - cxl_calls_put(calls); -} -EXPORT_SYMBOL_GPL(cxl_cx4_teardown_msi_irqs); - static int __init cxl_base_init(void) { struct device_node *np; diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index c6ec872800a2..5fe529b43ebe 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c @@ -74,7 +74,6 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master) ctx->pending_afu_err = false; INIT_LIST_HEAD(&ctx->irq_names); - INIT_LIST_HEAD(&ctx->extra_irq_contexts); /* * When we have to destroy all contexts in cxl_context_detach_all() we @@ -96,7 +95,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master) */ mutex_lock(&afu->contexts_lock); idr_preload(GFP_KERNEL); - i = idr_alloc(&ctx->afu->contexts_idr, ctx, ctx->afu->adapter->min_pe, + i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0, ctx->afu->num_procs, GFP_NOWAIT); idr_preload_end(); mutex_unlock(&afu->contexts_lock); diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index 918d4fb742d1..d1d927ccb589 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -93,11 +93,6 @@ static const cxl_p1_reg_t CXL_PSL_FIR_CNTL = {0x0148}; static const cxl_p1_reg_t CXL_PSL_DSNDCTL = {0x0150}; static const cxl_p1_reg_t CXL_PSL_SNWRALLOC = {0x0158}; static const cxl_p1_reg_t CXL_PSL_TRACE = {0x0170}; -/* XSL registers (Mellanox CX4) */ -static const cxl_p1_reg_t CXL_XSL_Timebase = {0x0100}; -static const cxl_p1_reg_t CXL_XSL_TB_CTLSTAT = {0x0108}; -static const cxl_p1_reg_t CXL_XSL_FEC = {0x0158}; -static const cxl_p1_reg_t CXL_XSL_DSNCTL = {0x0168}; /* PSL registers - CAIA 2 */ static const cxl_p1_reg_t CXL_PSL9_CONTROL = {0x0020}; static const cxl_p1_reg_t CXL_XSL9_INV = {0x0110}; @@ -613,7 +608,6 @@ struct cxl_context { bool pe_inserted; bool master; bool kernel; - bool real_mode; bool pending_irq; bool pending_fault; bool pending_afu_err; @@ -624,14 +618,6 @@ struct cxl_context { struct rcu_head rcu; - /* - * Only used when more interrupts are allocated via - * pci_enable_msix_range than are supported in the default context, to - * use additional contexts to overcome the limitation. i.e. Mellanox - * CX4 only: - */ - struct list_head extra_irq_contexts; - struct mm_struct *mm; u16 tidr; @@ -704,7 +690,6 @@ struct cxl { struct bin_attribute cxl_attr; int adapter_num; int user_irqs; - int min_pe; u64 ps_size; u16 psl_rev; u16 base_image; @@ -865,32 +850,12 @@ static inline bool cxl_is_power9(void) return false; } -static inline bool cxl_is_power9_dd1(void) -{ - if ((pvr_version_is(PVR_POWER9)) && - cpu_has_feature(CPU_FTR_POWER9_DD1)) - return true; - return false; -} - ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf, loff_t off, size_t count); -/* Internal functions wrapped in cxl_base to allow PHB to call them */ -bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu); -void _cxl_pci_disable_device(struct pci_dev *dev); -int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq); -int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type); -void _cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev); struct cxl_calls { void (*cxl_slbia)(struct mm_struct *mm); - bool (*cxl_pci_associate_default_context)(struct pci_dev *dev, struct cxl_afu *afu); - void (*cxl_pci_disable_device)(struct pci_dev *dev); - int (*cxl_next_msi_hwirq)(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq); - int (*cxl_cx4_setup_msi_irqs)(struct pci_dev *pdev, int nvec, int type); - void (*cxl_cx4_teardown_msi_irqs)(struct pci_dev *pdev); - struct module *owner; }; int register_cxl_calls(struct cxl_calls *calls); @@ -955,7 +920,6 @@ int cxl_debugfs_afu_add(struct cxl_afu *afu); void cxl_debugfs_afu_remove(struct cxl_afu *afu); void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter, struct dentry *dir); void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir); -void cxl_debugfs_add_adapter_regs_xsl(struct cxl *adapter, struct dentry *dir); void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir); void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir); @@ -998,11 +962,6 @@ static inline void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, { } -static inline void cxl_debugfs_add_adapter_regs_xsl(struct cxl *adapter, - struct dentry *dir) -{ -} - static inline void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir) { } diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c index 0bc7c31cf739..5a3f91255258 100644 --- a/drivers/misc/cxl/cxllib.c +++ b/drivers/misc/cxl/cxllib.c @@ -102,10 +102,6 @@ int cxllib_get_xsl_config(struct pci_dev *dev, struct cxllib_xsl_config *cfg) rc = cxl_get_xsl9_dsnctl(dev, capp_unit_id, &cfg->dsnctl); if (rc) return rc; - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { - /* workaround for DD1 - nbwind = capiind */ - cfg->dsnctl |= ((u64)0x02 << (63-47)); - } cfg->version = CXL_XSL_CONFIG_CURRENT_VERSION; cfg->log_bar_size = CXL_CAPI_WINDOW_LOG_SIZE; diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c index 1643850d2302..a1921d81593a 100644 --- a/drivers/misc/cxl/debugfs.c +++ b/drivers/misc/cxl/debugfs.c @@ -58,11 +58,6 @@ void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir) debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_TRACE)); } -void cxl_debugfs_add_adapter_regs_xsl(struct cxl *adapter, struct dentry *dir) -{ - debugfs_create_io_x64("fec", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_XSL_FEC)); -} - int cxl_debugfs_adapter_add(struct cxl *adapter) { struct dentry *dir; diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c index 70dbb6de102c..d45f3e6b17d2 100644 --- a/drivers/misc/cxl/fault.c +++ b/drivers/misc/cxl/fault.c @@ -33,7 +33,7 @@ static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb) * This finds a free SSTE for the given SLB, or returns NULL if it's already in * the segment table. */ -static struct cxl_sste* find_free_sste(struct cxl_context *ctx, +static struct cxl_sste *find_free_sste(struct cxl_context *ctx, struct copro_slb *slb) { struct cxl_sste *primary, *sste, *ret = NULL; diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c index 4644f16606a3..3bc0c15d4d85 100644 --- a/drivers/misc/cxl/guest.c +++ b/drivers/misc/cxl/guest.c @@ -623,9 +623,6 @@ static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u { pr_devel("in %s\n", __func__); - if (ctx->real_mode) - return -EPERM; - ctx->kernel = kernel; if (ctx->afu->current_mode == CXL_MODE_DIRECTED) return attach_afu_directed(ctx, wed, amr); @@ -916,11 +913,6 @@ static int afu_properties_look_ok(struct cxl_afu *afu) return -EINVAL; } - if (afu->crs_len < 0) { - dev_err(&afu->dev, "Unexpected configuration record size value\n"); - return -EINVAL; - } - return 0; } diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c index c1ba0d42cbc8..f35406be465a 100644 --- a/drivers/misc/cxl/main.c +++ b/drivers/misc/cxl/main.c @@ -104,11 +104,6 @@ static inline void cxl_slbia_core(struct mm_struct *mm) static struct cxl_calls cxl_calls = { .cxl_slbia = cxl_slbia_core, - .cxl_pci_associate_default_context = _cxl_pci_associate_default_context, - .cxl_pci_disable_device = _cxl_pci_disable_device, - .cxl_next_msi_hwirq = _cxl_next_msi_hwirq, - .cxl_cx4_setup_msi_irqs = _cxl_cx4_setup_msi_irqs, - .cxl_cx4_teardown_msi_irqs = _cxl_cx4_teardown_msi_irqs, .owner = THIS_MODULE, }; @@ -287,7 +282,7 @@ int cxl_adapter_context_get(struct cxl *adapter) int rc; rc = atomic_inc_unless_negative(&adapter->contexts_num); - return rc >= 0 ? 0 : -EBUSY; + return rc ? 0 : -EBUSY; } void cxl_adapter_context_put(struct cxl *adapter) diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index 98f867fcef24..c9d5d82dce8e 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -605,6 +605,7 @@ u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9) sr |= CXL_PSL_SR_An_MP; if (mfspr(SPRN_LPCR) & LPCR_TC) sr |= CXL_PSL_SR_An_TC; + if (kernel) { if (!real_mode) sr |= CXL_PSL_SR_An_R; @@ -629,7 +630,7 @@ u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9) static u64 calculate_sr(struct cxl_context *ctx) { - return cxl_calculate_sr(ctx->master, ctx->kernel, ctx->real_mode, + return cxl_calculate_sr(ctx->master, ctx->kernel, false, cxl_is_power9()); } diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 429d6de1dde7..b66d832d3233 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -55,8 +55,6 @@ pci_read_config_byte(dev, vsec + 0xa, dest) #define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \ pci_write_config_byte(dev, vsec + 0xa, val) -#define CXL_WRITE_VSEC_MODE_CONTROL_BUS(bus, devfn, vsec, val) \ - pci_bus_write_config_byte(bus, devfn, vsec + 0xa, val) #define CXL_VSEC_PROTOCOL_MASK 0xe0 #define CXL_VSEC_PROTOCOL_1024TB 0x80 #define CXL_VSEC_PROTOCOL_512TB 0x40 @@ -465,23 +463,21 @@ int cxl_get_xsl9_dsnctl(struct pci_dev *dev, u64 capp_unit_id, u64 *reg) /* nMMU_ID Defaults to: b’000001001’*/ xsl_dsnctl |= ((u64)0x09 << (63-28)); - if (!(cxl_is_power9_dd1())) { - /* - * Used to identify CAPI packets which should be sorted into - * the Non-Blocking queues by the PHB. This field should match - * the PHB PBL_NBW_CMPM register - * nbwind=0x03, bits [57:58], must include capi indicator. - * Not supported on P9 DD1. - */ - xsl_dsnctl |= (nbwind << (63-55)); + /* + * Used to identify CAPI packets which should be sorted into + * the Non-Blocking queues by the PHB. This field should match + * the PHB PBL_NBW_CMPM register + * nbwind=0x03, bits [57:58], must include capi indicator. + * Not supported on P9 DD1. + */ + xsl_dsnctl |= (nbwind << (63-55)); - /* - * Upper 16b address bits of ASB_Notify messages sent to the - * system. Need to match the PHB’s ASN Compare/Mask Register. - * Not supported on P9 DD1. - */ - xsl_dsnctl |= asnind; - } + /* + * Upper 16b address bits of ASB_Notify messages sent to the + * system. Need to match the PHB’s ASN Compare/Mask Register. + * Not supported on P9 DD1. + */ + xsl_dsnctl |= asnind; *reg = xsl_dsnctl; return 0; @@ -539,15 +535,8 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter, /* Snoop machines */ cxl_p1_write(adapter, CXL_PSL9_APCDEDALLOC, 0x800F000200000000ULL); - if (cxl_is_power9_dd1()) { - /* Disabling deadlock counter CAR */ - cxl_p1_write(adapter, CXL_PSL9_GP_CT, 0x0020000000000001ULL); - /* Enable NORST */ - cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0x8000000000000000ULL); - } else { - /* Enable NORST and DD2 features */ - cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0xC000000000000000ULL); - } + /* Enable NORST and DD2 features */ + cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0xC000000000000000ULL); /* * Check if PSL has data-cache. We need to flush adapter datacache @@ -595,27 +584,7 @@ static int init_implementation_adapter_regs_psl8(struct cxl *adapter, struct pci return 0; } -static int init_implementation_adapter_regs_xsl(struct cxl *adapter, struct pci_dev *dev) -{ - u64 xsl_dsnctl; - u64 chipid; - u32 phb_index; - u64 capp_unit_id; - int rc; - - rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id); - if (rc) - return rc; - - /* Tell XSL where to route data to */ - xsl_dsnctl = 0x0000600000000000ULL | (chipid << (63-5)); - xsl_dsnctl |= (capp_unit_id << (63-13)); - cxl_p1_write(adapter, CXL_XSL_DSNCTL, xsl_dsnctl); - - return 0; -} - -/* PSL & XSL */ +/* PSL */ #define TBSYNC_CAL(n) (((u64)n & 0x7) << (63-3)) #define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6)) /* For the PSL this is a multiple for 0 < n <= 7: */ @@ -627,21 +596,6 @@ static void write_timebase_ctrl_psl8(struct cxl *adapter) TBSYNC_CNT(2 * PSL_2048_250MHZ_CYCLES)); } -/* XSL */ -#define TBSYNC_ENA (1ULL << 63) -/* For the XSL this is 2**n * 2000 clocks for 0 < n <= 6: */ -#define XSL_2000_CLOCKS 1 -#define XSL_4000_CLOCKS 2 -#define XSL_8000_CLOCKS 3 - -static void write_timebase_ctrl_xsl(struct cxl *adapter) -{ - cxl_p1_write(adapter, CXL_XSL_TB_CTLSTAT, - TBSYNC_ENA | - TBSYNC_CAL(3) | - TBSYNC_CNT(XSL_4000_CLOCKS)); -} - static u64 timebase_read_psl9(struct cxl *adapter) { return cxl_p1_read(adapter, CXL_PSL9_Timebase); @@ -652,11 +606,6 @@ static u64 timebase_read_psl8(struct cxl *adapter) return cxl_p1_read(adapter, CXL_PSL_Timebase); } -static u64 timebase_read_xsl(struct cxl *adapter) -{ - return cxl_p1_read(adapter, CXL_XSL_Timebase); -} - static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) { struct device_node *np; @@ -800,234 +749,36 @@ static int setup_cxl_bars(struct pci_dev *dev) return 0; } -#ifdef CONFIG_CXL_BIMODAL - -struct cxl_switch_work { - struct pci_dev *dev; - struct work_struct work; - int vsec; - int mode; -}; - -static void switch_card_to_cxl(struct work_struct *work) +/* pciex node: ibm,opal-m64-window = <0x3d058 0x0 0x3d058 0x0 0x8 0x0>; */ +static int switch_card_to_cxl(struct pci_dev *dev) { - struct cxl_switch_work *switch_work = - container_of(work, struct cxl_switch_work, work); - struct pci_dev *dev = switch_work->dev; - struct pci_bus *bus = dev->bus; - struct pci_controller *hose = pci_bus_to_host(bus); - struct pci_dev *bridge; - struct pnv_php_slot *php_slot; - unsigned int devfn; + int vsec; u8 val; int rc; - dev_info(&bus->dev, "cxl: Preparing for mode switch...\n"); - bridge = list_first_entry_or_null(&hose->bus->devices, struct pci_dev, - bus_list); - if (!bridge) { - dev_WARN(&bus->dev, "cxl: Couldn't find root port!\n"); - goto err_dev_put; - } - - php_slot = pnv_php_find_slot(pci_device_to_OF_node(bridge)); - if (!php_slot) { - dev_err(&bus->dev, "cxl: Failed to find slot hotplug " - "information. You may need to upgrade " - "skiboot. Aborting.\n"); - goto err_dev_put; - } - - rc = CXL_READ_VSEC_MODE_CONTROL(dev, switch_work->vsec, &val); - if (rc) { - dev_err(&bus->dev, "cxl: Failed to read CAPI mode control: %i\n", rc); - goto err_dev_put; - } - devfn = dev->devfn; - - /* Release the reference obtained in cxl_check_and_switch_mode() */ - pci_dev_put(dev); - - dev_dbg(&bus->dev, "cxl: Removing PCI devices from kernel\n"); - pci_lock_rescan_remove(); - pci_hp_remove_devices(bridge->subordinate); - pci_unlock_rescan_remove(); - - /* Switch the CXL protocol on the card */ - if (switch_work->mode == CXL_BIMODE_CXL) { - dev_info(&bus->dev, "cxl: Switching card to CXL mode\n"); - val &= ~CXL_VSEC_PROTOCOL_MASK; - val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE; - rc = pnv_cxl_enable_phb_kernel_api(hose, true); - if (rc) { - dev_err(&bus->dev, "cxl: Failed to enable kernel API" - " on real PHB, aborting\n"); - goto err_free_work; - } - } else { - dev_WARN(&bus->dev, "cxl: Switching card to PCI mode not supported!\n"); - goto err_free_work; - } - - rc = CXL_WRITE_VSEC_MODE_CONTROL_BUS(bus, devfn, switch_work->vsec, val); - if (rc) { - dev_err(&bus->dev, "cxl: Failed to configure CXL protocol: %i\n", rc); - goto err_free_work; - } + dev_info(&dev->dev, "switch card to CXL\n"); - /* - * The CAIA spec (v1.1, Section 10.6 Bi-modal Device Support) states - * we must wait 100ms after this mode switch before touching PCIe config - * space. - */ - msleep(100); - - /* - * Hot reset to cause the card to come back in cxl mode. A - * OPAL_RESET_PCI_LINK would be sufficient, but currently lacks support - * in skiboot, so we use a hot reset instead. - * - * We call pci_set_pcie_reset_state() on the bridge, as a CAPI card is - * guaranteed to sit directly under the root port, and setting the reset - * state on a device directly under the root port is equivalent to doing - * it on the root port iself. - */ - dev_info(&bus->dev, "cxl: Configuration write complete, resetting card\n"); - pci_set_pcie_reset_state(bridge, pcie_hot_reset); - pci_set_pcie_reset_state(bridge, pcie_deassert_reset); - - dev_dbg(&bus->dev, "cxl: Offlining slot\n"); - rc = pnv_php_set_slot_power_state(&php_slot->slot, OPAL_PCI_SLOT_OFFLINE); - if (rc) { - dev_err(&bus->dev, "cxl: OPAL offlining call failed: %i\n", rc); - goto err_free_work; - } - - dev_dbg(&bus->dev, "cxl: Onlining and probing slot\n"); - rc = pnv_php_set_slot_power_state(&php_slot->slot, OPAL_PCI_SLOT_ONLINE); - if (rc) { - dev_err(&bus->dev, "cxl: OPAL onlining call failed: %i\n", rc); - goto err_free_work; - } - - pci_lock_rescan_remove(); - pci_hp_add_devices(bridge->subordinate); - pci_unlock_rescan_remove(); - - dev_info(&bus->dev, "cxl: CAPI mode switch completed\n"); - kfree(switch_work); - return; - -err_dev_put: - /* Release the reference obtained in cxl_check_and_switch_mode() */ - pci_dev_put(dev); -err_free_work: - kfree(switch_work); -} - -int cxl_check_and_switch_mode(struct pci_dev *dev, int mode, int vsec) -{ - struct cxl_switch_work *work; - u8 val; - int rc; - - if (!cpu_has_feature(CPU_FTR_HVMODE)) + if (!(vsec = find_cxl_vsec(dev))) { + dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n"); return -ENODEV; - - if (!vsec) { - vsec = find_cxl_vsec(dev); - if (!vsec) { - dev_info(&dev->dev, "CXL VSEC not found\n"); - return -ENODEV; - } } - rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val); - if (rc) { - dev_err(&dev->dev, "Failed to read current mode control: %i", rc); + if ((rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val))) { + dev_err(&dev->dev, "failed to read current mode control: %i", rc); return rc; } - - if (mode == CXL_BIMODE_PCI) { - if (!(val & CXL_VSEC_PROTOCOL_ENABLE)) { - dev_info(&dev->dev, "Card is already in PCI mode\n"); - return 0; - } - /* - * TODO: Before it's safe to switch the card back to PCI mode - * we need to disable the CAPP and make sure any cachelines the - * card holds have been flushed out. Needs skiboot support. - */ - dev_WARN(&dev->dev, "CXL mode switch to PCI unsupported!\n"); - return -EIO; - } - - if (val & CXL_VSEC_PROTOCOL_ENABLE) { - dev_info(&dev->dev, "Card is already in CXL mode\n"); - return 0; + val &= ~CXL_VSEC_PROTOCOL_MASK; + val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE; + if ((rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val))) { + dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc); + return rc; } - - dev_info(&dev->dev, "Card is in PCI mode, scheduling kernel thread " - "to switch to CXL mode\n"); - - work = kmalloc(sizeof(struct cxl_switch_work), GFP_KERNEL); - if (!work) - return -ENOMEM; - - pci_dev_get(dev); - work->dev = dev; - work->vsec = vsec; - work->mode = mode; - INIT_WORK(&work->work, switch_card_to_cxl); - - schedule_work(&work->work); - /* - * We return a failure now to abort the driver init. Once the - * link has been cycled and the card is in cxl mode we will - * come back (possibly using the generic cxl driver), but - * return success as the card should then be in cxl mode. - * - * TODO: What if the card comes back in PCI mode even after - * the switch? Don't want to spin endlessly. + * The CAIA spec (v0.12 11.6 Bi-modal Device Support) states + * we must wait 100ms after this mode switch before touching + * PCIe config space. */ - return -EBUSY; -} -EXPORT_SYMBOL_GPL(cxl_check_and_switch_mode); - -#endif /* CONFIG_CXL_BIMODAL */ - -static int setup_cxl_protocol_area(struct pci_dev *dev) -{ - u8 val; - int rc; - int vsec = find_cxl_vsec(dev); - - if (!vsec) { - dev_info(&dev->dev, "CXL VSEC not found\n"); - return -ENODEV; - } - - rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val); - if (rc) { - dev_err(&dev->dev, "Failed to read current mode control: %i\n", rc); - return rc; - } - - if (!(val & CXL_VSEC_PROTOCOL_ENABLE)) { - dev_err(&dev->dev, "Card not in CAPI mode!\n"); - return -EIO; - } - - if ((val & CXL_VSEC_PROTOCOL_MASK) != CXL_VSEC_PROTOCOL_256TB) { - val &= ~CXL_VSEC_PROTOCOL_MASK; - val |= CXL_VSEC_PROTOCOL_256TB; - rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val); - if (rc) { - dev_err(&dev->dev, "Failed to set CXL protocol area: %i\n", rc); - return rc; - } - } + msleep(100); return 0; } @@ -1724,7 +1475,7 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) if ((rc = setup_cxl_bars(dev))) return rc; - if ((rc = setup_cxl_protocol_area(dev))) + if ((rc = switch_card_to_cxl(dev))) return rc; if ((rc = cxl_update_image_control(adapter))) @@ -1871,37 +1622,14 @@ static const struct cxl_service_layer_ops psl8_ops = { .needs_reset_before_disable = true, }; -static const struct cxl_service_layer_ops xsl_ops = { - .adapter_regs_init = init_implementation_adapter_regs_xsl, - .invalidate_all = cxl_invalidate_all_psl8, - .sanitise_afu_regs = sanitise_afu_regs_psl8, - .handle_interrupt = cxl_irq_psl8, - .fail_irq = cxl_fail_irq_psl, - .activate_dedicated_process = cxl_activate_dedicated_process_psl8, - .attach_afu_directed = cxl_attach_afu_directed_psl8, - .attach_dedicated_process = cxl_attach_dedicated_process_psl8, - .update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl8, - .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_xsl, - .write_timebase_ctrl = write_timebase_ctrl_xsl, - .timebase_read = timebase_read_xsl, - .capi_mode = OPAL_PHB_CAPI_MODE_DMA, -}; - static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev) { - if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) { - /* Mellanox CX-4 */ - dev_info(&dev->dev, "Device uses an XSL\n"); - adapter->native->sl_ops = &xsl_ops; - adapter->min_pe = 1; /* Workaround for CX-4 hardware bug */ + if (cxl_is_power8()) { + dev_info(&dev->dev, "Device uses a PSL8\n"); + adapter->native->sl_ops = &psl8_ops; } else { - if (cxl_is_power8()) { - dev_info(&dev->dev, "Device uses a PSL8\n"); - adapter->native->sl_ops = &psl8_ops; - } else { - dev_info(&dev->dev, "Device uses a PSL9\n"); - adapter->native->sl_ops = &psl9_ops; - } + dev_info(&dev->dev, "Device uses a PSL9\n"); + adapter->native->sl_ops = &psl9_ops; } } @@ -2008,43 +1736,6 @@ int cxl_slot_is_switched(struct pci_dev *dev) return (depth > CXL_MAX_PCIEX_PARENT); } -bool cxl_slot_is_supported(struct pci_dev *dev, int flags) -{ - if (!cpu_has_feature(CPU_FTR_HVMODE)) - return false; - - if ((flags & CXL_SLOT_FLAG_DMA) && (!pvr_version_is(PVR_POWER8NVL))) { - /* - * CAPP DMA mode is technically supported on regular P8, but - * will EEH if the card attempts to access memory < 4GB, which - * we cannot realistically avoid. We might be able to work - * around the issue, but until then return unsupported: - */ - return false; - } - - if (cxl_slot_is_switched(dev)) - return false; - - /* - * XXX: This gets a little tricky on regular P8 (not POWER8NVL) since - * the CAPP can be connected to PHB 0, 1 or 2 on a first come first - * served basis, which is racy to check from here. If we need to - * support this in future we might need to consider having this - * function effectively reserve it ahead of time. - * - * Currently, the only user of this API is the Mellanox CX4, which is - * only supported on P8NVL due to the above mentioned limitation of - * CAPP DMA mode and therefore does not need to worry about this. If the - * issue with CAPP DMA mode is later worked around on P8 we might need - * to revisit this. - */ - - return true; -} -EXPORT_SYMBOL_GPL(cxl_slot_is_supported); - - static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct cxl *adapter; @@ -2086,9 +1777,6 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc); } - if (pnv_pci_on_cxl_phb(dev) && adapter->slices >= 1) - pnv_cxl_phb_set_peer_afu(dev, adapter->afu[0]); - return 0; } diff --git a/drivers/misc/cxl/phb.c b/drivers/misc/cxl/phb.c deleted file mode 100644 index 6ec69ada19f4..000000000000 --- a/drivers/misc/cxl/phb.c +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2014-2016 IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include <linux/pci.h> -#include "cxl.h" - -bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu) -{ - struct cxl_context *ctx; - - /* - * Allocate a context to do cxl things to. This is used for interrupts - * in the peer model using a real phb, and if we eventually do DMA ops - * in the virtual phb, we'll need a default context to attach them to. - */ - ctx = cxl_dev_context_init(dev); - if (IS_ERR(ctx)) - return false; - dev->dev.archdata.cxl_ctx = ctx; - - return (cxl_ops->afu_check_and_enable(afu) == 0); -} -/* exported via cxl_base */ - -void _cxl_pci_disable_device(struct pci_dev *dev) -{ - struct cxl_context *ctx = cxl_get_context(dev); - - if (ctx) { - if (ctx->status == STARTED) { - dev_err(&dev->dev, "Default context started\n"); - return; - } - dev->dev.archdata.cxl_ctx = NULL; - cxl_release_context(ctx); - } -} -/* exported via cxl_base */ diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c index 7fd0bdc1436a..7908633d9204 100644 --- a/drivers/misc/cxl/vphb.c +++ b/drivers/misc/cxl/vphb.c @@ -9,7 +9,6 @@ #include <linux/pci.h> #include <misc/cxl.h> -#include <asm/pnv-pci.h> #include "cxl.h" static int cxl_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) @@ -45,6 +44,7 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev) { struct pci_controller *phb; struct cxl_afu *afu; + struct cxl_context *ctx; phb = pci_bus_to_host(dev->bus); afu = (struct cxl_afu *)phb->private_data; @@ -57,7 +57,30 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev) set_dma_ops(&dev->dev, &dma_nommu_ops); set_dma_offset(&dev->dev, PAGE_OFFSET); - return _cxl_pci_associate_default_context(dev, afu); + /* + * Allocate a context to do cxl things too. If we eventually do real + * DMA ops, we'll need a default context to attach them to + */ + ctx = cxl_dev_context_init(dev); + if (IS_ERR(ctx)) + return false; + dev->dev.archdata.cxl_ctx = ctx; + + return (cxl_ops->afu_check_and_enable(afu) == 0); +} + +static void cxl_pci_disable_device(struct pci_dev *dev) +{ + struct cxl_context *ctx = cxl_get_context(dev); + + if (ctx) { + if (ctx->status == STARTED) { + dev_err(&dev->dev, "Default context started\n"); + return; + } + dev->dev.archdata.cxl_ctx = NULL; + cxl_release_context(ctx); + } } static resource_size_t cxl_pci_window_alignment(struct pci_bus *bus, @@ -191,8 +214,8 @@ static struct pci_controller_ops cxl_pci_controller_ops = { .probe_mode = cxl_pci_probe_mode, .enable_device_hook = cxl_pci_enable_device_hook, - .disable_device = _cxl_pci_disable_device, - .release_device = _cxl_pci_disable_device, + .disable_device = cxl_pci_disable_device, + .release_device = cxl_pci_disable_device, .window_alignment = cxl_pci_window_alignment, .reset_secondary_bus = cxl_pci_reset_secondary_bus, .setup_msi_irqs = cxl_setup_msi_irqs, @@ -284,18 +307,13 @@ void cxl_pci_vphb_remove(struct cxl_afu *afu) */ } -static bool _cxl_pci_is_vphb_device(struct pci_controller *phb) -{ - return (phb->ops == &cxl_pcie_pci_ops); -} - bool cxl_pci_is_vphb_device(struct pci_dev *dev) { struct pci_controller *phb; phb = pci_bus_to_host(dev->bus); - return _cxl_pci_is_vphb_device(phb); + return (phb->ops == &cxl_pcie_pci_ops); } struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev) @@ -304,13 +322,7 @@ struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev) phb = pci_bus_to_host(dev->bus); - if (_cxl_pci_is_vphb_device(phb)) - return (struct cxl_afu *)phb->private_data; - - if (pnv_pci_on_cxl_phb(dev)) - return pnv_cxl_phb_to_afu(phb); - - return ERR_PTR(-ENODEV); + return (struct cxl_afu *)phb->private_data; } EXPORT_SYMBOL_GPL(cxl_pci_to_afu); diff --git a/drivers/misc/ocxl/context.c b/drivers/misc/ocxl/context.c index 95f74623113e..c10a940e3b38 100644 --- a/drivers/misc/ocxl/context.c +++ b/drivers/misc/ocxl/context.c @@ -86,7 +86,7 @@ out: return rc; } -static int map_afu_irq(struct vm_area_struct *vma, unsigned long address, +static vm_fault_t map_afu_irq(struct vm_area_struct *vma, unsigned long address, u64 offset, struct ocxl_context *ctx) { u64 trigger_addr; @@ -95,15 +95,15 @@ static int map_afu_irq(struct vm_area_struct *vma, unsigned long address, if (!trigger_addr) return VM_FAULT_SIGBUS; - vm_insert_pfn(vma, address, trigger_addr >> PAGE_SHIFT); - return VM_FAULT_NOPAGE; + return vmf_insert_pfn(vma, address, trigger_addr >> PAGE_SHIFT); } -static int map_pp_mmio(struct vm_area_struct *vma, unsigned long address, +static vm_fault_t map_pp_mmio(struct vm_area_struct *vma, unsigned long address, u64 offset, struct ocxl_context *ctx) { u64 pp_mmio_addr; int pasid_off; + vm_fault_t ret; if (offset >= ctx->afu->config.pp_mmio_stride) return VM_FAULT_SIGBUS; @@ -121,27 +121,27 @@ static int map_pp_mmio(struct vm_area_struct *vma, unsigned long address, pasid_off * ctx->afu->config.pp_mmio_stride + offset; - vm_insert_pfn(vma, address, pp_mmio_addr >> PAGE_SHIFT); + ret = vmf_insert_pfn(vma, address, pp_mmio_addr >> PAGE_SHIFT); mutex_unlock(&ctx->status_mutex); - return VM_FAULT_NOPAGE; + return ret; } -static int ocxl_mmap_fault(struct vm_fault *vmf) +static vm_fault_t ocxl_mmap_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct ocxl_context *ctx = vma->vm_file->private_data; u64 offset; - int rc; + vm_fault_t ret; offset = vmf->pgoff << PAGE_SHIFT; pr_debug("%s: pasid %d address 0x%lx offset 0x%llx\n", __func__, ctx->pasid, vmf->address, offset); if (offset < ctx->afu->irq_base_offset) - rc = map_pp_mmio(vma, vmf->address, offset, ctx); + ret = map_pp_mmio(vma, vmf->address, offset, ctx); else - rc = map_afu_irq(vma, vmf->address, offset, ctx); - return rc; + ret = map_afu_irq(vma, vmf->address, offset, ctx); + return ret; } static const struct vm_operations_struct ocxl_vmops = { diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c index 88876ae8f330..a963b0a4a3c5 100644 --- a/drivers/misc/ocxl/link.c +++ b/drivers/misc/ocxl/link.c @@ -136,7 +136,7 @@ static void xsl_fault_handler_bh(struct work_struct *fault_work) int rc; /* - * We need to release a reference on the mm whenever exiting this + * We must release a reference on mm_users whenever exiting this * function (taken in the memory fault interrupt handler) */ rc = copro_handle_mm_fault(fault->pe_data.mm, fault->dar, fault->dsisr, @@ -172,7 +172,7 @@ static void xsl_fault_handler_bh(struct work_struct *fault_work) } r = RESTART; ack: - mmdrop(fault->pe_data.mm); + mmput(fault->pe_data.mm); ack_irq(spa, r); } @@ -184,6 +184,7 @@ static irqreturn_t xsl_fault_handler(int irq, void *data) struct pe_data *pe_data; struct ocxl_process_element *pe; int lpid, pid, tid; + bool schedule = false; read_irq(spa, &dsisr, &dar, &pe_handle); trace_ocxl_fault(spa->spa_mem, pe_handle, dsisr, dar, -1); @@ -226,14 +227,19 @@ static irqreturn_t xsl_fault_handler(int irq, void *data) } WARN_ON(pe_data->mm->context.id != pid); - spa->xsl_fault.pe = pe_handle; - spa->xsl_fault.dar = dar; - spa->xsl_fault.dsisr = dsisr; - spa->xsl_fault.pe_data = *pe_data; - mmgrab(pe_data->mm); /* mm count is released by bottom half */ - + if (mmget_not_zero(pe_data->mm)) { + spa->xsl_fault.pe = pe_handle; + spa->xsl_fault.dar = dar; + spa->xsl_fault.dsisr = dsisr; + spa->xsl_fault.pe_data = *pe_data; + schedule = true; + /* mm_users count released by bottom half */ + } rcu_read_unlock(); - schedule_work(&spa->xsl_fault.fault_work); + if (schedule) + schedule_work(&spa->xsl_fault.fault_work); + else + ack_irq(spa, ADDRESS_ERROR); return IRQ_HANDLED; } diff --git a/drivers/misc/ocxl/sysfs.c b/drivers/misc/ocxl/sysfs.c index d9753a1db14b..0ab1fd1b2682 100644 --- a/drivers/misc/ocxl/sysfs.c +++ b/drivers/misc/ocxl/sysfs.c @@ -64,7 +64,7 @@ static ssize_t global_mmio_read(struct file *filp, struct kobject *kobj, return count; } -static int global_mmio_fault(struct vm_fault *vmf) +static vm_fault_t global_mmio_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct ocxl_afu *afu = vma->vm_private_data; @@ -75,8 +75,7 @@ static int global_mmio_fault(struct vm_fault *vmf) offset = vmf->pgoff; offset += (afu->global_mmio_start >> PAGE_SHIFT); - vm_insert_pfn(vma, vmf->address, offset); - return VM_FAULT_NOPAGE; + return vmf_insert_pfn(vma, vmf->address, offset); } static const struct vm_operations_struct global_mmio_vmops = { diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c index 7709fcc707f4..5414c4a87bea 100644 --- a/drivers/tty/hvc/hvc_console.c +++ b/drivers/tty/hvc/hvc_console.c @@ -73,7 +73,7 @@ static LIST_HEAD(hvc_structs); * Protect the list of hvc_struct instances from inserts and removals during * list traversal. */ -static DEFINE_SPINLOCK(hvc_structs_lock); +static DEFINE_MUTEX(hvc_structs_mutex); /* * This value is used to assign a tty->index value to a hvc_struct based @@ -83,7 +83,7 @@ static DEFINE_SPINLOCK(hvc_structs_lock); static int last_hvc = -1; /* - * Do not call this function with either the hvc_structs_lock or the hvc_struct + * Do not call this function with either the hvc_structs_mutex or the hvc_struct * lock held. If successful, this function increments the kref reference * count against the target hvc_struct so it should be released when finished. */ @@ -92,24 +92,46 @@ static struct hvc_struct *hvc_get_by_index(int index) struct hvc_struct *hp; unsigned long flags; - spin_lock(&hvc_structs_lock); + mutex_lock(&hvc_structs_mutex); list_for_each_entry(hp, &hvc_structs, next) { spin_lock_irqsave(&hp->lock, flags); if (hp->index == index) { tty_port_get(&hp->port); spin_unlock_irqrestore(&hp->lock, flags); - spin_unlock(&hvc_structs_lock); + mutex_unlock(&hvc_structs_mutex); return hp; } spin_unlock_irqrestore(&hp->lock, flags); } hp = NULL; + mutex_unlock(&hvc_structs_mutex); - spin_unlock(&hvc_structs_lock); return hp; } +static int __hvc_flush(const struct hv_ops *ops, uint32_t vtermno, bool wait) +{ + if (wait) + might_sleep(); + + if (ops->flush) + return ops->flush(vtermno, wait); + return 0; +} + +static int hvc_console_flush(const struct hv_ops *ops, uint32_t vtermno) +{ + return __hvc_flush(ops, vtermno, false); +} + +/* + * Wait for the console to flush before writing more to it. This sleeps. + */ +static int hvc_flush(struct hvc_struct *hp) +{ + return __hvc_flush(hp->ops, hp->vtermno, true); +} /* * Initial console vtermnos for console API usage prior to full console @@ -156,8 +178,12 @@ static void hvc_console_print(struct console *co, const char *b, if (r <= 0) { /* throw away characters on error * but spin in case of -EAGAIN */ - if (r != -EAGAIN) + if (r != -EAGAIN) { i = 0; + } else { + hvc_console_flush(cons_ops[index], + vtermnos[index]); + } } else if (r > 0) { i -= r; if (i > 0) @@ -165,6 +191,7 @@ static void hvc_console_print(struct console *co, const char *b, } } } + hvc_console_flush(cons_ops[index], vtermnos[index]); } static struct tty_driver *hvc_console_device(struct console *c, int *index) @@ -224,13 +251,13 @@ static void hvc_port_destruct(struct tty_port *port) struct hvc_struct *hp = container_of(port, struct hvc_struct, port); unsigned long flags; - spin_lock(&hvc_structs_lock); + mutex_lock(&hvc_structs_mutex); spin_lock_irqsave(&hp->lock, flags); list_del(&(hp->next)); spin_unlock_irqrestore(&hp->lock, flags); - spin_unlock(&hvc_structs_lock); + mutex_unlock(&hvc_structs_mutex); kfree(hp); } @@ -494,23 +521,32 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count if (hp->port.count <= 0) return -EIO; - spin_lock_irqsave(&hp->lock, flags); + while (count > 0) { + spin_lock_irqsave(&hp->lock, flags); - /* Push pending writes */ - if (hp->n_outbuf > 0) - hvc_push(hp); - - while (count > 0 && (rsize = hp->outbuf_size - hp->n_outbuf) > 0) { - if (rsize > count) - rsize = count; - memcpy(hp->outbuf + hp->n_outbuf, buf, rsize); - count -= rsize; - buf += rsize; - hp->n_outbuf += rsize; - written += rsize; - hvc_push(hp); + rsize = hp->outbuf_size - hp->n_outbuf; + + if (rsize) { + if (rsize > count) + rsize = count; + memcpy(hp->outbuf + hp->n_outbuf, buf, rsize); + count -= rsize; + buf += rsize; + hp->n_outbuf += rsize; + written += rsize; + } + + if (hp->n_outbuf > 0) + hvc_push(hp); + + spin_unlock_irqrestore(&hp->lock, flags); + + if (count) { + if (hp->n_outbuf > 0) + hvc_flush(hp); + cond_resched(); + } } - spin_unlock_irqrestore(&hp->lock, flags); /* * Racy, but harmless, kick thread if there is still pending data. @@ -590,10 +626,10 @@ static u32 timeout = MIN_TIMEOUT; #define HVC_POLL_READ 0x00000001 #define HVC_POLL_WRITE 0x00000002 -int hvc_poll(struct hvc_struct *hp) +static int __hvc_poll(struct hvc_struct *hp, bool may_sleep) { struct tty_struct *tty; - int i, n, poll_mask = 0; + int i, n, count, poll_mask = 0; char buf[N_INBUF] __ALIGNED__; unsigned long flags; int read_total = 0; @@ -612,6 +648,12 @@ int hvc_poll(struct hvc_struct *hp) timeout = (written_total) ? 0 : MIN_TIMEOUT; } + if (may_sleep) { + spin_unlock_irqrestore(&hp->lock, flags); + cond_resched(); + spin_lock_irqsave(&hp->lock, flags); + } + /* No tty attached, just skip */ tty = tty_port_tty_get(&hp->port); if (tty == NULL) @@ -619,7 +661,7 @@ int hvc_poll(struct hvc_struct *hp) /* Now check if we can get data (are we throttled ?) */ if (tty_throttled(tty)) - goto throttled; + goto out; /* If we aren't notifier driven and aren't throttled, we always * request a reschedule @@ -628,56 +670,58 @@ int hvc_poll(struct hvc_struct *hp) poll_mask |= HVC_POLL_READ; /* Read data if any */ - for (;;) { - int count = tty_buffer_request_room(&hp->port, N_INBUF); - /* If flip is full, just reschedule a later read */ - if (count == 0) { + count = tty_buffer_request_room(&hp->port, N_INBUF); + + /* If flip is full, just reschedule a later read */ + if (count == 0) { + poll_mask |= HVC_POLL_READ; + goto out; + } + + n = hp->ops->get_chars(hp->vtermno, buf, count); + if (n <= 0) { + /* Hangup the tty when disconnected from host */ + if (n == -EPIPE) { + spin_unlock_irqrestore(&hp->lock, flags); + tty_hangup(tty); + spin_lock_irqsave(&hp->lock, flags); + } else if ( n == -EAGAIN ) { + /* + * Some back-ends can only ensure a certain min + * num of bytes read, which may be > 'count'. + * Let the tty clear the flip buff to make room. + */ poll_mask |= HVC_POLL_READ; - break; } + goto out; + } - n = hp->ops->get_chars(hp->vtermno, buf, count); - if (n <= 0) { - /* Hangup the tty when disconnected from host */ - if (n == -EPIPE) { - spin_unlock_irqrestore(&hp->lock, flags); - tty_hangup(tty); - spin_lock_irqsave(&hp->lock, flags); - } else if ( n == -EAGAIN ) { - /* - * Some back-ends can only ensure a certain min - * num of bytes read, which may be > 'count'. - * Let the tty clear the flip buff to make room. - */ - poll_mask |= HVC_POLL_READ; - } - break; - } - for (i = 0; i < n; ++i) { + for (i = 0; i < n; ++i) { #ifdef CONFIG_MAGIC_SYSRQ - if (hp->index == hvc_console.index) { - /* Handle the SysRq Hack */ - /* XXX should support a sequence */ - if (buf[i] == '\x0f') { /* ^O */ - /* if ^O is pressed again, reset - * sysrq_pressed and flip ^O char */ - sysrq_pressed = !sysrq_pressed; - if (sysrq_pressed) - continue; - } else if (sysrq_pressed) { - handle_sysrq(buf[i]); - sysrq_pressed = 0; + if (hp->index == hvc_console.index) { + /* Handle the SysRq Hack */ + /* XXX should support a sequence */ + if (buf[i] == '\x0f') { /* ^O */ + /* if ^O is pressed again, reset + * sysrq_pressed and flip ^O char */ + sysrq_pressed = !sysrq_pressed; + if (sysrq_pressed) continue; - } + } else if (sysrq_pressed) { + handle_sysrq(buf[i]); + sysrq_pressed = 0; + continue; } -#endif /* CONFIG_MAGIC_SYSRQ */ - tty_insert_flip_char(&hp->port, buf[i], 0); } - - read_total += n; +#endif /* CONFIG_MAGIC_SYSRQ */ + tty_insert_flip_char(&hp->port, buf[i], 0); } - throttled: + if (n == count) + poll_mask |= HVC_POLL_READ; + read_total = n; + + out: /* Wakeup write queue if necessary */ if (hp->do_wakeup) { hp->do_wakeup = 0; @@ -697,6 +741,11 @@ int hvc_poll(struct hvc_struct *hp) return poll_mask; } + +int hvc_poll(struct hvc_struct *hp) +{ + return __hvc_poll(hp, false); +} EXPORT_SYMBOL_GPL(hvc_poll); /** @@ -733,11 +782,12 @@ static int khvcd(void *unused) try_to_freeze(); wmb(); if (!cpus_are_in_xmon()) { - spin_lock(&hvc_structs_lock); + mutex_lock(&hvc_structs_mutex); list_for_each_entry(hp, &hvc_structs, next) { - poll_mask |= hvc_poll(hp); + poll_mask |= __hvc_poll(hp, true); + cond_resched(); } - spin_unlock(&hvc_structs_lock); + mutex_unlock(&hvc_structs_mutex); } else poll_mask |= HVC_POLL_READ; if (hvc_kicked) @@ -871,7 +921,7 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data, INIT_WORK(&hp->tty_resize, hvc_set_winsz); spin_lock_init(&hp->lock); - spin_lock(&hvc_structs_lock); + mutex_lock(&hvc_structs_mutex); /* * find index to use: @@ -891,7 +941,7 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data, vtermnos[i] = vtermno; list_add_tail(&(hp->next), &hvc_structs); - spin_unlock(&hvc_structs_lock); + mutex_unlock(&hvc_structs_mutex); /* check if we need to re-register the kernel console */ hvc_check_console(i); diff --git a/drivers/tty/hvc/hvc_console.h b/drivers/tty/hvc/hvc_console.h index ea63090e013f..e9319954c832 100644 --- a/drivers/tty/hvc/hvc_console.h +++ b/drivers/tty/hvc/hvc_console.h @@ -54,6 +54,7 @@ struct hvc_struct { struct hv_ops { int (*get_chars)(uint32_t vtermno, char *buf, int count); int (*put_chars)(uint32_t vtermno, const char *buf, int count); + int (*flush)(uint32_t vtermno, bool wait); /* Callbacks for notification. Called in open, close and hangup */ int (*notifier_add)(struct hvc_struct *hp, int irq); diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c index 9645c0062a90..f631f8bee308 100644 --- a/drivers/tty/hvc/hvc_opal.c +++ b/drivers/tty/hvc/hvc_opal.c @@ -183,9 +183,15 @@ static int hvc_opal_probe(struct platform_device *dev) return -ENOMEM; pv->proto = proto; hvc_opal_privs[termno] = pv; - if (proto == HV_PROTOCOL_HVSI) - hvsilib_init(&pv->hvsi, opal_get_chars, opal_put_chars, + if (proto == HV_PROTOCOL_HVSI) { + /* + * We want put_chars to be atomic to avoid mangling of + * hvsi packets. + */ + hvsilib_init(&pv->hvsi, + opal_get_chars, opal_put_chars_atomic, termno, 0); + } /* Instanciate now to establish a mapping index==vtermno */ hvc_instantiate(termno, termno, ops); @@ -275,6 +281,11 @@ static void udbg_opal_putc(char c) count = hvc_opal_hvsi_put_chars(termno, &c, 1); break; } + + /* This is needed for the cosole to flush + * when there aren't any interrupts. + */ + opal_flush_console(termno); } while(count == 0 || count == -EAGAIN); } @@ -302,14 +313,8 @@ static int udbg_opal_getc(void) int ch; for (;;) { ch = udbg_opal_getc_poll(); - if (ch == -1) { - /* This shouldn't be needed...but... */ - volatile unsigned long delay; - for (delay=0; delay < 2000000; delay++) - ; - } else { + if (ch != -1) return ch; - } } } @@ -370,8 +375,9 @@ void __init hvc_opal_init_early(void) else if (of_device_is_compatible(stdout_node,"ibm,opal-console-hvsi")) { hvc_opal_boot_priv.proto = HV_PROTOCOL_HVSI; ops = &hvc_opal_hvsi_ops; - hvsilib_init(&hvc_opal_boot_priv.hvsi, opal_get_chars, - opal_put_chars, index, 1); + hvsilib_init(&hvc_opal_boot_priv.hvsi, + opal_get_chars, opal_put_chars_atomic, + index, 1); /* HVSI, perform the handshake now */ hvsilib_establish(&hvc_opal_boot_priv.hvsi); pr_devel("hvc_opal: Found HVSI console\n"); @@ -403,7 +409,8 @@ void __init udbg_init_debug_opal_hvsi(void) hvc_opal_privs[index] = &hvc_opal_boot_priv; hvc_opal_boot_termno = index; udbg_init_opal_common(); - hvsilib_init(&hvc_opal_boot_priv.hvsi, opal_get_chars, opal_put_chars, + hvsilib_init(&hvc_opal_boot_priv.hvsi, + opal_get_chars, opal_put_chars_atomic, index, 1); hvsilib_establish(&hvc_opal_boot_priv.hvsi); } diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c index 8c733492d8fe..454d8c624a3f 100644 --- a/drivers/usb/host/ehci-ps3.c +++ b/drivers/usb/host/ehci-ps3.c @@ -86,7 +86,7 @@ static int ps3_ehci_probe(struct ps3_system_bus_device *dev) int result; struct usb_hcd *hcd; unsigned int virq; - static u64 dummy_mask = DMA_BIT_MASK(32); + static u64 dummy_mask; if (usb_disabled()) { result = -ENODEV; @@ -131,7 +131,9 @@ static int ps3_ehci_probe(struct ps3_system_bus_device *dev) goto fail_irq; } - dev->core.dma_mask = &dummy_mask; /* FIXME: for improper usb code */ + dummy_mask = DMA_BIT_MASK(32); + dev->core.dma_mask = &dummy_mask; + dma_set_coherent_mask(&dev->core, dummy_mask); hcd = usb_create_hcd(&ps3_ehci_hc_driver, &dev->core, dev_name(&dev->core)); diff --git a/drivers/usb/host/ohci-ps3.c b/drivers/usb/host/ohci-ps3.c index 20a23d795adf..395f9d3bc849 100644 --- a/drivers/usb/host/ohci-ps3.c +++ b/drivers/usb/host/ohci-ps3.c @@ -69,7 +69,7 @@ static int ps3_ohci_probe(struct ps3_system_bus_device *dev) int result; struct usb_hcd *hcd; unsigned int virq; - static u64 dummy_mask = DMA_BIT_MASK(32); + static u64 dummy_mask; if (usb_disabled()) { result = -ENODEV; @@ -115,7 +115,9 @@ static int ps3_ohci_probe(struct ps3_system_bus_device *dev) goto fail_irq; } - dev->core.dma_mask = &dummy_mask; /* FIXME: for improper usb code */ + dummy_mask = DMA_BIT_MASK(32); + dev->core.dma_mask = &dummy_mask; + dma_set_coherent_mask(&dev->core, dummy_mask); hcd = usb_create_hcd(&ps3_ohci_hc_driver, &dev->core, dev_name(&dev->core)); diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 7cd63b0c1a46..96721b154454 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -211,44 +211,6 @@ static long tce_iommu_register_pages(struct tce_container *container, return 0; } -static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl, - struct mm_struct *mm) -{ - unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) * - tbl->it_size, PAGE_SIZE); - unsigned long *uas; - long ret; - - BUG_ON(tbl->it_userspace); - - ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT); - if (ret) - return ret; - - uas = vzalloc(cb); - if (!uas) { - decrement_locked_vm(mm, cb >> PAGE_SHIFT); - return -ENOMEM; - } - tbl->it_userspace = uas; - - return 0; -} - -static void tce_iommu_userspace_view_free(struct iommu_table *tbl, - struct mm_struct *mm) -{ - unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) * - tbl->it_size, PAGE_SIZE); - - if (!tbl->it_userspace) - return; - - vfree(tbl->it_userspace); - tbl->it_userspace = NULL; - decrement_locked_vm(mm, cb >> PAGE_SHIFT); -} - static bool tce_page_is_contained(struct page *page, unsigned page_shift) { /* @@ -482,20 +444,20 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container, struct mm_iommu_table_group_mem_t *mem = NULL; int ret; unsigned long hpa = 0; - unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); + __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); if (!pua) return; - ret = tce_iommu_prereg_ua_to_hpa(container, *pua, tbl->it_page_shift, - &hpa, &mem); + ret = tce_iommu_prereg_ua_to_hpa(container, be64_to_cpu(*pua), + tbl->it_page_shift, &hpa, &mem); if (ret) - pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n", - __func__, *pua, entry, ret); + pr_debug("%s: tce %llx at #%lx was not cached, ret=%d\n", + __func__, be64_to_cpu(*pua), entry, ret); if (mem) mm_iommu_mapped_dec(mem); - *pua = 0; + *pua = cpu_to_be64(0); } static int tce_iommu_clear(struct tce_container *container, @@ -599,16 +561,9 @@ static long tce_iommu_build_v2(struct tce_container *container, unsigned long hpa; enum dma_data_direction dirtmp; - if (!tbl->it_userspace) { - ret = tce_iommu_userspace_view_alloc(tbl, container->mm); - if (ret) - return ret; - } - for (i = 0; i < pages; ++i) { struct mm_iommu_table_group_mem_t *mem = NULL; - unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, - entry + i); + __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry + i); ret = tce_iommu_prereg_ua_to_hpa(container, tce, tbl->it_page_shift, &hpa, &mem); @@ -642,7 +597,7 @@ static long tce_iommu_build_v2(struct tce_container *container, if (dirtmp != DMA_NONE) tce_iommu_unuse_page_v2(container, tbl, entry + i); - *pua = tce; + *pua = cpu_to_be64(tce); tce += IOMMU_PAGE_SIZE(tbl); } @@ -676,7 +631,7 @@ static long tce_iommu_create_table(struct tce_container *container, page_shift, window_size, levels, ptbl); WARN_ON(!ret && !(*ptbl)->it_ops->free); - WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size)); + WARN_ON(!ret && ((*ptbl)->it_allocated_size > table_size)); return ret; } @@ -686,7 +641,6 @@ static void tce_iommu_free_table(struct tce_container *container, { unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT; - tce_iommu_userspace_view_free(tbl, container->mm); iommu_tce_table_put(tbl); decrement_locked_vm(container->mm, pages); } @@ -1201,7 +1155,6 @@ static void tce_iommu_release_ownership(struct tce_container *container, continue; tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); - tce_iommu_userspace_view_free(tbl, container->mm); if (tbl->it_map) iommu_release_ownership(tbl); |