1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
|
// SPDX-License-Identifier: MIT
/*
* Copyright © 2022 Intel Corporation
*/
#include <linux/math64.h>
#include "xe_gt_clock.h"
#include "regs/xe_gt_regs.h"
#include "regs/xe_regs.h"
#include "xe_assert.h"
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_macros.h"
#include "xe_mmio.h"
static u32 read_reference_ts_freq(struct xe_gt *gt)
{
u32 ts_override = xe_mmio_read32(gt, TIMESTAMP_OVERRIDE);
u32 base_freq, frac_freq;
base_freq = REG_FIELD_GET(TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK,
ts_override) + 1;
base_freq *= 1000000;
frac_freq = REG_FIELD_GET(TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK,
ts_override);
frac_freq = 1000000 / (frac_freq + 1);
return base_freq + frac_freq;
}
static u32 get_crystal_clock_freq(u32 rpm_config_reg)
{
const u32 f19_2_mhz = 19200000;
const u32 f24_mhz = 24000000;
const u32 f25_mhz = 25000000;
const u32 f38_4_mhz = 38400000;
u32 crystal_clock = REG_FIELD_GET(RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK,
rpm_config_reg);
switch (crystal_clock) {
case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
return f24_mhz;
case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
return f19_2_mhz;
case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
return f38_4_mhz;
case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
return f25_mhz;
default:
XE_WARN_ON("NOT_POSSIBLE");
return 0;
}
}
int xe_gt_clock_init(struct xe_gt *gt)
{
u32 ctc_reg = xe_mmio_read32(gt, CTC_MODE);
u32 freq = 0;
/* Assuming gen11+ so assert this assumption is correct */
xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
if (ctc_reg & CTC_SOURCE_DIVIDE_LOGIC) {
freq = read_reference_ts_freq(gt);
} else {
u32 c0 = xe_mmio_read32(gt, RPM_CONFIG0);
freq = get_crystal_clock_freq(c0);
/*
* Now figure out how the command stream's timestamp
* register increments from this frequency (it might
* increment only every few clock cycle).
*/
freq >>= 3 - REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, c0);
}
gt->info.reference_clock = freq;
return 0;
}
static u64 div_u64_roundup(u64 n, u32 d)
{
return div_u64(n + d - 1, d);
}
/**
* xe_gt_clock_interval_to_ms - Convert sampled GT clock ticks to msec
*
* @gt: the &xe_gt
* @count: count of GT clock ticks
*
* Returns: time in msec
*/
u64 xe_gt_clock_interval_to_ms(struct xe_gt *gt, u64 count)
{
return div_u64_roundup(count * MSEC_PER_SEC, gt->info.reference_clock);
}
|