summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/xe/xe_gpu_scheduler.c
blob: 50361b4638f96a81f171743db024ec9938bc1a75 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
// SPDX-License-Identifier: MIT
/*
 * Copyright © 2023 Intel Corporation
 */

#include "xe_gpu_scheduler.h"

static void xe_sched_process_msg_queue(struct xe_gpu_scheduler *sched)
{
	if (!READ_ONCE(sched->base.pause_submit))
		queue_work(sched->base.submit_wq, &sched->work_process_msg);
}

static void xe_sched_process_msg_queue_if_ready(struct xe_gpu_scheduler *sched)
{
	struct xe_sched_msg *msg;

	xe_sched_msg_lock(sched);
	msg = list_first_entry_or_null(&sched->msgs, struct xe_sched_msg, link);
	if (msg)
		xe_sched_process_msg_queue(sched);
	xe_sched_msg_unlock(sched);
}

static struct xe_sched_msg *
xe_sched_get_msg(struct xe_gpu_scheduler *sched)
{
	struct xe_sched_msg *msg;

	xe_sched_msg_lock(sched);
	msg = list_first_entry_or_null(&sched->msgs,
				       struct xe_sched_msg, link);
	if (msg)
		list_del_init(&msg->link);
	xe_sched_msg_unlock(sched);

	return msg;
}

static void xe_sched_process_msg_work(struct work_struct *w)
{
	struct xe_gpu_scheduler *sched =
		container_of(w, struct xe_gpu_scheduler, work_process_msg);
	struct xe_sched_msg *msg;

	if (READ_ONCE(sched->base.pause_submit))
		return;

	msg = xe_sched_get_msg(sched);
	if (msg) {
		sched->ops->process_msg(msg);

		xe_sched_process_msg_queue_if_ready(sched);
	}
}

int xe_sched_init(struct xe_gpu_scheduler *sched,
		  const struct drm_sched_backend_ops *ops,
		  const struct xe_sched_backend_ops *xe_ops,
		  struct workqueue_struct *submit_wq,
		  uint32_t hw_submission, unsigned hang_limit,
		  long timeout, struct workqueue_struct *timeout_wq,
		  atomic_t *score, const char *name,
		  struct device *dev)
{
	sched->ops = xe_ops;
	INIT_LIST_HEAD(&sched->msgs);
	INIT_WORK(&sched->work_process_msg, xe_sched_process_msg_work);

	return drm_sched_init(&sched->base, ops, submit_wq, 1, hw_submission,
			      hang_limit, timeout, timeout_wq, score, name,
			      dev);
}

void xe_sched_fini(struct xe_gpu_scheduler *sched)
{
	xe_sched_submission_stop(sched);
	drm_sched_fini(&sched->base);
}

void xe_sched_submission_start(struct xe_gpu_scheduler *sched)
{
	drm_sched_wqueue_start(&sched->base);
	queue_work(sched->base.submit_wq, &sched->work_process_msg);
}

void xe_sched_submission_stop(struct xe_gpu_scheduler *sched)
{
	drm_sched_wqueue_stop(&sched->base);
	cancel_work_sync(&sched->work_process_msg);
}

void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched)
{
	drm_sched_resume_timeout(&sched->base, sched->base.timeout);
}

void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
		      struct xe_sched_msg *msg)
{
	xe_sched_msg_lock(sched);
	xe_sched_add_msg_locked(sched, msg);
	xe_sched_msg_unlock(sched);
}

void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
			     struct xe_sched_msg *msg)
{
	lockdep_assert_held(&sched->base.job_list_lock);

	list_add_tail(&msg->link, &sched->msgs);
	xe_sched_process_msg_queue(sched);
}