diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 181 |
1 files changed, 147 insertions, 34 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index d33cb344be69..193ffdb957b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -223,12 +223,16 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, *pos &= (1UL << 22) - 1; r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } r = amdgpu_virt_enable_access_debugfs(adev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } if (use_bank) { if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || @@ -332,12 +336,16 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, return -EINVAL; r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } r = amdgpu_virt_enable_access_debugfs(adev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } while (size) { uint32_t value; @@ -387,12 +395,16 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user return -EINVAL; r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } r = amdgpu_virt_enable_access_debugfs(adev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } while (size) { uint32_t value; @@ -443,12 +455,16 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, return -EINVAL; r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } r = amdgpu_virt_enable_access_debugfs(adev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } while (size) { uint32_t value; @@ -498,12 +514,16 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user return -EINVAL; r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } r = amdgpu_virt_enable_access_debugfs(adev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } while (size) { uint32_t value; @@ -554,12 +574,16 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, return -EINVAL; r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } r = amdgpu_virt_enable_access_debugfs(adev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } while (size) { uint32_t value; @@ -609,12 +633,16 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * return -EINVAL; r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } r = amdgpu_virt_enable_access_debugfs(adev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } while (size) { uint32_t value; @@ -764,12 +792,16 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, valuesize = sizeof(values); r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } r = amdgpu_virt_enable_access_debugfs(adev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); @@ -842,12 +874,16 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, simd = (*pos & GENMASK_ULL(44, 37)) >> 37; r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } r = amdgpu_virt_enable_access_debugfs(adev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } /* switch to the specific se/sh/cu */ mutex_lock(&adev->grbm_idx_mutex); @@ -937,11 +973,11 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, r = pm_runtime_get_sync(adev->ddev->dev); if (r < 0) - return r; + goto err; r = amdgpu_virt_enable_access_debugfs(adev); if (r < 0) - return r; + goto err; /* switch to the specific se/sh/cu */ mutex_lock(&adev->grbm_idx_mutex); @@ -967,7 +1003,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, value = data[result >> 2]; r = put_user(value, (uint32_t *)buf); if (r) { - result = r; + amdgpu_virt_disable_access_debugfs(adev); goto err; } @@ -976,10 +1012,14 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, size -= 4; } -err: kfree(data); amdgpu_virt_disable_access_debugfs(adev); return result; + +err: + pm_runtime_put_autosuspend(adev->ddev->dev); + kfree(data); + return r; } /** @@ -1003,8 +1043,10 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu return -EINVAL; r = pm_runtime_get_sync(adev->ddev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } while (size) { uint32_t value; @@ -1031,6 +1073,57 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu } +/** + * amdgpu_debugfs_regs_gfxoff_status - read gfxoff status + * + * @f: open file handle + * @buf: User buffer to store read data in + * @size: Number of bytes to read + * @pos: Offset to seek to + */ +static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + ssize_t result = 0; + int r; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + r = pm_runtime_get_sync(adev->ddev->dev); + if (r < 0) + return r; + + while (size) { + uint32_t value; + + r = amdgpu_get_gfx_off_status(adev, &value); + if (r) { + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + return r; + } + + r = put_user(value, (uint32_t *)buf); + if (r) { + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + return r; + } + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + + return result; +} + static const struct file_operations amdgpu_debugfs_regs_fops = { .owner = THIS_MODULE, .read = amdgpu_debugfs_regs_read, @@ -1081,7 +1174,9 @@ static const struct file_operations amdgpu_debugfs_gpr_fops = { static const struct file_operations amdgpu_debugfs_gfxoff_fops = { .owner = THIS_MODULE, + .read = amdgpu_debugfs_gfxoff_read, .write = amdgpu_debugfs_gfxoff_write, + .llseek = default_llseek }; static const struct file_operations *debugfs_regs[] = { @@ -1140,8 +1235,10 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) int r = 0, i; r = pm_runtime_get_sync(dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } /* Avoid accidently unparking the sched thread during GPU reset */ mutex_lock(&adev->lock_reset); @@ -1197,8 +1294,10 @@ static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data) int r; r = pm_runtime_get_sync(dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev)); @@ -1216,8 +1315,10 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data) int r; r = pm_runtime_get_sync(dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT)); @@ -1295,27 +1396,37 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched) static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring) { struct amdgpu_job *job; - struct drm_sched_job *s_job; + struct drm_sched_job *s_job, *tmp; uint32_t preempt_seq; struct dma_fence *fence, **ptr; struct amdgpu_fence_driver *drv = &ring->fence_drv; struct drm_gpu_scheduler *sched = &ring->sched; + bool preempted = true; if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) return; preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2)); - if (preempt_seq <= atomic_read(&drv->last_seq)) - return; + if (preempt_seq <= atomic_read(&drv->last_seq)) { + preempted = false; + goto no_preempt; + } preempt_seq &= drv->num_fences_mask; ptr = &drv->fences[preempt_seq]; fence = rcu_dereference_protected(*ptr, 1); +no_preempt: spin_lock(&sched->job_list_lock); - list_for_each_entry(s_job, &sched->ring_mirror_list, node) { + list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { + if (dma_fence_is_signaled(&s_job->s_fence->finished)) { + /* remove job from ring_mirror_list */ + list_del_init(&s_job->node); + sched->ops->free_job(s_job); + continue; + } job = to_amdgpu_job(s_job); - if (job->fence == fence) + if (preempted && job->fence == fence) /* mark the job as preempted */ job->preemption_status |= AMDGPU_IB_PREEMPTED; } @@ -1407,14 +1518,16 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val) return -EINVAL; ret = pm_runtime_get_sync(adev->ddev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(adev->ddev->dev); return ret; + } if (is_support_sw_smu(adev)) { - ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq, true); + ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq); if (ret || val > max_freq || val < min_freq) return -EINVAL; - ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val, true); + ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val); } else { return 0; } |