summaryrefslogtreecommitdiffstats
path: root/kernel/kcsan
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2019-11-20 10:41:43 +0100
committerIngo Molnar <mingo@kernel.org>2019-11-20 10:47:23 +0100
commit5cbaefe9743bf14c9d3106db0cc19f8cb0a3ca22 (patch)
treeb89923344fb8eab289073d904d64e29f51723e88 /kernel/kcsan
parentMerge branch 'for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/pau... (diff)
downloadlinux-5cbaefe9743bf14c9d3106db0cc19f8cb0a3ca22.tar.xz
linux-5cbaefe9743bf14c9d3106db0cc19f8cb0a3ca22.zip
kcsan: Improve various small stylistic details
Tidy up a few bits: - Fix typos and grammar, improve wording. - Remove spurious newlines that are col80 warning artifacts where the resulting line-break is worse than the disease it's curing. - Use core kernel coding style to improve readability and reduce spurious code pattern variations. - Use better vertical alignment for structure definitions and initialization sequences. - Misc other small details. No change in functionality intended. Cc: linux-kernel@vger.kernel.org Cc: Marco Elver <elver@google.com> Cc: Paul E. McKenney <paulmck@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Paul E. McKenney <paulmck@kernel.org> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/kcsan')
-rw-r--r--kernel/kcsan/atomic.h2
-rw-r--r--kernel/kcsan/core.c59
-rw-r--r--kernel/kcsan/debugfs.c62
-rw-r--r--kernel/kcsan/encoding.h25
-rw-r--r--kernel/kcsan/kcsan.h11
-rw-r--r--kernel/kcsan/report.c42
-rw-r--r--kernel/kcsan/test.c6
7 files changed, 99 insertions, 108 deletions
diff --git a/kernel/kcsan/atomic.h b/kernel/kcsan/atomic.h
index c9c3fe628011..576e03ddd6a3 100644
--- a/kernel/kcsan/atomic.h
+++ b/kernel/kcsan/atomic.h
@@ -6,7 +6,7 @@
#include <linux/jiffies.h>
/*
- * Helper that returns true if access to ptr should be considered as an atomic
+ * Helper that returns true if access to @ptr should be considered an atomic
* access, even though it is not explicitly atomic.
*
* List all volatile globals that have been observed in races, to suppress
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index d9410d58c93e..3314fc29e236 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -19,10 +19,10 @@ bool kcsan_enabled;
/* Per-CPU kcsan_ctx for interrupts */
static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
- .disable_count = 0,
- .atomic_next = 0,
- .atomic_nest_count = 0,
- .in_flat_atomic = false,
+ .disable_count = 0,
+ .atomic_next = 0,
+ .atomic_nest_count = 0,
+ .in_flat_atomic = false,
};
/*
@@ -50,11 +50,11 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
* slot=9: [10, 11, 9]
* slot=63: [64, 65, 63]
*/
-#define NUM_SLOTS (1 + 2 * KCSAN_CHECK_ADJACENT)
+#define NUM_SLOTS (1 + 2*KCSAN_CHECK_ADJACENT)
#define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS))
/*
- * SLOT_IDX_FAST is used in fast-path. Not first checking the address's primary
+ * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary
* slot (middle) is fine if we assume that data races occur rarely. The set of
* indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to
* {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}.
@@ -68,9 +68,9 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
* zero-initialized state matches INVALID_WATCHPOINT.
*
* Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to
- * use more complicated SLOT_IDX_FAST calculation with modulo in fast-path.
+ * use more complicated SLOT_IDX_FAST calculation with modulo in the fast-path.
*/
-static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS - 1];
+static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
/*
* Instructions to skip watching counter, used in should_watch(). We use a
@@ -78,7 +78,8 @@ static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS - 1];
*/
static DEFINE_PER_CPU(long, kcsan_skip);
-static inline atomic_long_t *find_watchpoint(unsigned long addr, size_t size,
+static inline atomic_long_t *find_watchpoint(unsigned long addr,
+ size_t size,
bool expect_write,
long *encoded_watchpoint)
{
@@ -110,8 +111,8 @@ static inline atomic_long_t *find_watchpoint(unsigned long addr, size_t size,
return NULL;
}
-static inline atomic_long_t *insert_watchpoint(unsigned long addr, size_t size,
- bool is_write)
+static inline atomic_long_t *
+insert_watchpoint(unsigned long addr, size_t size, bool is_write)
{
const int slot = watchpoint_slot(addr);
const long encoded_watchpoint = encode_watchpoint(addr, size, is_write);
@@ -120,21 +121,16 @@ static inline atomic_long_t *insert_watchpoint(unsigned long addr, size_t size,
/* Check slot index logic, ensuring we stay within array bounds. */
BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT);
- BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT + 1) != 0);
- BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS - 1,
- KCSAN_CHECK_ADJACENT) !=
- ARRAY_SIZE(watchpoints) - 1);
- BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS - 1,
- KCSAN_CHECK_ADJACENT + 1) !=
- ARRAY_SIZE(watchpoints) - NUM_SLOTS);
+ BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT+1) != 0);
+ BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT) != ARRAY_SIZE(watchpoints)-1);
+ BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT+1) != ARRAY_SIZE(watchpoints) - NUM_SLOTS);
for (i = 0; i < NUM_SLOTS; ++i) {
long expect_val = INVALID_WATCHPOINT;
/* Try to acquire this slot. */
watchpoint = &watchpoints[SLOT_IDX(slot, i)];
- if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val,
- encoded_watchpoint))
+ if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, encoded_watchpoint))
return watchpoint;
}
@@ -150,11 +146,10 @@ static inline atomic_long_t *insert_watchpoint(unsigned long addr, size_t size,
* 2. the thread that set up the watchpoint already removed it;
* 3. the watchpoint was removed and then re-used.
*/
-static inline bool try_consume_watchpoint(atomic_long_t *watchpoint,
- long encoded_watchpoint)
+static inline bool
+try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint)
{
- return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint,
- CONSUMED_WATCHPOINT);
+ return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT);
}
/*
@@ -162,14 +157,13 @@ static inline bool try_consume_watchpoint(atomic_long_t *watchpoint,
*/
static inline bool remove_watchpoint(atomic_long_t *watchpoint)
{
- return atomic_long_xchg_relaxed(watchpoint, INVALID_WATCHPOINT) !=
- CONSUMED_WATCHPOINT;
+ return atomic_long_xchg_relaxed(watchpoint, INVALID_WATCHPOINT) != CONSUMED_WATCHPOINT;
}
static inline struct kcsan_ctx *get_ctx(void)
{
/*
- * In interrupt, use raw_cpu_ptr to avoid unnecessary checks, that would
+ * In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would
* also result in calls that generate warnings in uaccess regions.
*/
return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
@@ -260,7 +254,8 @@ static inline unsigned int get_delay(void)
*/
static noinline void kcsan_found_watchpoint(const volatile void *ptr,
- size_t size, bool is_write,
+ size_t size,
+ bool is_write,
atomic_long_t *watchpoint,
long encoded_watchpoint)
{
@@ -296,8 +291,8 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
user_access_restore(flags);
}
-static noinline void kcsan_setup_watchpoint(const volatile void *ptr,
- size_t size, bool is_write)
+static noinline void
+kcsan_setup_watchpoint(const volatile void *ptr, size_t size, bool is_write)
{
atomic_long_t *watchpoint;
union {
@@ -346,8 +341,8 @@ static noinline void kcsan_setup_watchpoint(const volatile void *ptr,
watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
if (watchpoint == NULL) {
/*
- * Out of capacity: the size of `watchpoints`, and the frequency
- * with which `should_watch()` returns true should be tweaked so
+ * Out of capacity: the size of 'watchpoints', and the frequency
+ * with which should_watch() returns true should be tweaked so
* that this case happens very rarely.
*/
kcsan_counter_inc(KCSAN_COUNTER_NO_CAPACITY);
diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
index 041d520a0183..bec42dab32ee 100644
--- a/kernel/kcsan/debugfs.c
+++ b/kernel/kcsan/debugfs.c
@@ -24,39 +24,31 @@ static atomic_long_t counters[KCSAN_COUNTER_COUNT];
* whitelist or blacklist.
*/
static struct {
- unsigned long *addrs; /* array of addresses */
- size_t size; /* current size */
- int used; /* number of elements used */
- bool sorted; /* if elements are sorted */
- bool whitelist; /* if list is a blacklist or whitelist */
+ unsigned long *addrs; /* array of addresses */
+ size_t size; /* current size */
+ int used; /* number of elements used */
+ bool sorted; /* if elements are sorted */
+ bool whitelist; /* if list is a blacklist or whitelist */
} report_filterlist = {
- .addrs = NULL,
- .size = 8, /* small initial size */
- .used = 0,
- .sorted = false,
- .whitelist = false, /* default is blacklist */
+ .addrs = NULL,
+ .size = 8, /* small initial size */
+ .used = 0,
+ .sorted = false,
+ .whitelist = false, /* default is blacklist */
};
static DEFINE_SPINLOCK(report_filterlist_lock);
static const char *counter_to_name(enum kcsan_counter_id id)
{
switch (id) {
- case KCSAN_COUNTER_USED_WATCHPOINTS:
- return "used_watchpoints";
- case KCSAN_COUNTER_SETUP_WATCHPOINTS:
- return "setup_watchpoints";
- case KCSAN_COUNTER_DATA_RACES:
- return "data_races";
- case KCSAN_COUNTER_NO_CAPACITY:
- return "no_capacity";
- case KCSAN_COUNTER_REPORT_RACES:
- return "report_races";
- case KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN:
- return "races_unknown_origin";
- case KCSAN_COUNTER_UNENCODABLE_ACCESSES:
- return "unencodable_accesses";
- case KCSAN_COUNTER_ENCODING_FALSE_POSITIVES:
- return "encoding_false_positives";
+ case KCSAN_COUNTER_USED_WATCHPOINTS: return "used_watchpoints";
+ case KCSAN_COUNTER_SETUP_WATCHPOINTS: return "setup_watchpoints";
+ case KCSAN_COUNTER_DATA_RACES: return "data_races";
+ case KCSAN_COUNTER_NO_CAPACITY: return "no_capacity";
+ case KCSAN_COUNTER_REPORT_RACES: return "report_races";
+ case KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN: return "races_unknown_origin";
+ case KCSAN_COUNTER_UNENCODABLE_ACCESSES: return "unencodable_accesses";
+ case KCSAN_COUNTER_ENCODING_FALSE_POSITIVES: return "encoding_false_positives";
case KCSAN_COUNTER_COUNT:
BUG();
}
@@ -116,7 +108,7 @@ bool kcsan_skip_report_debugfs(unsigned long func_addr)
if (!kallsyms_lookup_size_offset(func_addr, &symbolsize, &offset))
return false;
- func_addr -= offset; /* get function start */
+ func_addr -= offset; /* Get function start */
spin_lock_irqsave(&report_filterlist_lock, flags);
if (report_filterlist.used == 0)
@@ -195,6 +187,7 @@ static ssize_t insert_report_filterlist(const char *func)
out:
spin_unlock_irqrestore(&report_filterlist_lock, flags);
+
return ret;
}
@@ -226,8 +219,8 @@ static int debugfs_open(struct inode *inode, struct file *file)
return single_open(file, show_info, NULL);
}
-static ssize_t debugfs_write(struct file *file, const char __user *buf,
- size_t count, loff_t *off)
+static ssize_t
+debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
{
char kbuf[KSYM_NAME_LEN];
char *arg;
@@ -264,10 +257,13 @@ static ssize_t debugfs_write(struct file *file, const char __user *buf,
return count;
}
-static const struct file_operations debugfs_ops = { .read = seq_read,
- .open = debugfs_open,
- .write = debugfs_write,
- .release = single_release };
+static const struct file_operations debugfs_ops =
+{
+ .read = seq_read,
+ .open = debugfs_open,
+ .write = debugfs_write,
+ .release = single_release
+};
void __init kcsan_debugfs_init(void)
{
diff --git a/kernel/kcsan/encoding.h b/kernel/kcsan/encoding.h
index e17bdac0e54b..b63890e86449 100644
--- a/kernel/kcsan/encoding.h
+++ b/kernel/kcsan/encoding.h
@@ -10,7 +10,8 @@
#include "kcsan.h"
#define SLOT_RANGE PAGE_SIZE
-#define INVALID_WATCHPOINT 0
+
+#define INVALID_WATCHPOINT 0
#define CONSUMED_WATCHPOINT 1
/*
@@ -34,24 +35,24 @@
* Both these are assumed to be very unlikely. However, in case it still happens
* happens, the report logic will filter out the false positive (see report.c).
*/
-#define WATCHPOINT_ADDR_BITS (BITS_PER_LONG - 1 - WATCHPOINT_SIZE_BITS)
+#define WATCHPOINT_ADDR_BITS (BITS_PER_LONG-1 - WATCHPOINT_SIZE_BITS)
/*
* Masks to set/retrieve the encoded data.
*/
-#define WATCHPOINT_WRITE_MASK BIT(BITS_PER_LONG - 1)
+#define WATCHPOINT_WRITE_MASK BIT(BITS_PER_LONG-1)
#define WATCHPOINT_SIZE_MASK \
- GENMASK(BITS_PER_LONG - 2, BITS_PER_LONG - 2 - WATCHPOINT_SIZE_BITS)
+ GENMASK(BITS_PER_LONG-2, BITS_PER_LONG-2 - WATCHPOINT_SIZE_BITS)
#define WATCHPOINT_ADDR_MASK \
- GENMASK(BITS_PER_LONG - 3 - WATCHPOINT_SIZE_BITS, 0)
+ GENMASK(BITS_PER_LONG-3 - WATCHPOINT_SIZE_BITS, 0)
static inline bool check_encodable(unsigned long addr, size_t size)
{
return size <= MAX_ENCODABLE_SIZE;
}
-static inline long encode_watchpoint(unsigned long addr, size_t size,
- bool is_write)
+static inline long
+encode_watchpoint(unsigned long addr, size_t size, bool is_write)
{
return (long)((is_write ? WATCHPOINT_WRITE_MASK : 0) |
(size << WATCHPOINT_ADDR_BITS) |
@@ -59,17 +60,17 @@ static inline long encode_watchpoint(unsigned long addr, size_t size,
}
static inline bool decode_watchpoint(long watchpoint,
- unsigned long *addr_masked, size_t *size,
+ unsigned long *addr_masked,
+ size_t *size,
bool *is_write)
{
if (watchpoint == INVALID_WATCHPOINT ||
watchpoint == CONSUMED_WATCHPOINT)
return false;
- *addr_masked = (unsigned long)watchpoint & WATCHPOINT_ADDR_MASK;
- *size = ((unsigned long)watchpoint & WATCHPOINT_SIZE_MASK) >>
- WATCHPOINT_ADDR_BITS;
- *is_write = !!((unsigned long)watchpoint & WATCHPOINT_WRITE_MASK);
+ *addr_masked = (unsigned long)watchpoint & WATCHPOINT_ADDR_MASK;
+ *size = ((unsigned long)watchpoint & WATCHPOINT_SIZE_MASK) >> WATCHPOINT_ADDR_BITS;
+ *is_write = !!((unsigned long)watchpoint & WATCHPOINT_WRITE_MASK);
return true;
}
diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
index 1bb2f1c0d61e..d3b9a96ac8a4 100644
--- a/kernel/kcsan/kcsan.h
+++ b/kernel/kcsan/kcsan.h
@@ -72,14 +72,14 @@ enum kcsan_counter_id {
/*
* Increment/decrement counter with given id; avoid calling these in fast-path.
*/
-void kcsan_counter_inc(enum kcsan_counter_id id);
-void kcsan_counter_dec(enum kcsan_counter_id id);
+extern void kcsan_counter_inc(enum kcsan_counter_id id);
+extern void kcsan_counter_dec(enum kcsan_counter_id id);
/*
* Returns true if data races in the function symbol that maps to func_addr
* (offsets are ignored) should *not* be reported.
*/
-bool kcsan_skip_report_debugfs(unsigned long func_addr);
+extern bool kcsan_skip_report_debugfs(unsigned long func_addr);
enum kcsan_report_type {
/*
@@ -99,10 +99,11 @@ enum kcsan_report_type {
*/
KCSAN_REPORT_RACE_UNKNOWN_ORIGIN,
};
+
/*
* Print a race report from thread that encountered the race.
*/
-void kcsan_report(const volatile void *ptr, size_t size, bool is_write,
- bool value_change, int cpu_id, enum kcsan_report_type type);
+extern void kcsan_report(const volatile void *ptr, size_t size, bool is_write,
+ bool value_change, int cpu_id, enum kcsan_report_type type);
#endif /* _KERNEL_KCSAN_KCSAN_H */
diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index ead5610bafa7..0eea05a3135b 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -22,13 +22,13 @@
* the reports, with reporting being in the slow-path.
*/
static struct {
- const volatile void *ptr;
- size_t size;
- bool is_write;
- int task_pid;
- int cpu_id;
- unsigned long stack_entries[NUM_STACK_ENTRIES];
- int num_stack_entries;
+ const volatile void *ptr;
+ size_t size;
+ bool is_write;
+ int task_pid;
+ int cpu_id;
+ unsigned long stack_entries[NUM_STACK_ENTRIES];
+ int num_stack_entries;
} other_info = { .ptr = NULL };
/*
@@ -40,8 +40,8 @@ static DEFINE_SPINLOCK(report_lock);
/*
* Special rules to skip reporting.
*/
-static bool skip_report(bool is_write, bool value_change,
- unsigned long top_frame)
+static bool
+skip_report(bool is_write, bool value_change, unsigned long top_frame)
{
if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) && is_write &&
!value_change) {
@@ -105,6 +105,7 @@ static int sym_strcmp(void *addr1, void *addr2)
snprintf(buf1, sizeof(buf1), "%pS", addr1);
snprintf(buf2, sizeof(buf2), "%pS", addr2);
+
return strncmp(buf1, buf2, sizeof(buf1));
}
@@ -116,8 +117,7 @@ static bool print_report(const volatile void *ptr, size_t size, bool is_write,
enum kcsan_report_type type)
{
unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 };
- int num_stack_entries =
- stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1);
+ int num_stack_entries = stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1);
int skipnr = get_stack_skipnr(stack_entries, num_stack_entries);
int other_skipnr;
@@ -131,7 +131,7 @@ static bool print_report(const volatile void *ptr, size_t size, bool is_write,
other_skipnr = get_stack_skipnr(other_info.stack_entries,
other_info.num_stack_entries);
- /* value_change is only known for the other thread */
+ /* @value_change is only known for the other thread */
if (skip_report(other_info.is_write, value_change,
other_info.stack_entries[other_skipnr]))
return false;
@@ -241,13 +241,12 @@ retry:
if (other_info.ptr != NULL)
break; /* still in use, retry */
- other_info.ptr = ptr;
- other_info.size = size;
- other_info.is_write = is_write;
- other_info.task_pid = in_task() ? task_pid_nr(current) : -1;
- other_info.cpu_id = cpu_id;
- other_info.num_stack_entries = stack_trace_save(
- other_info.stack_entries, NUM_STACK_ENTRIES, 1);
+ other_info.ptr = ptr;
+ other_info.size = size;
+ other_info.is_write = is_write;
+ other_info.task_pid = in_task() ? task_pid_nr(current) : -1;
+ other_info.cpu_id = cpu_id;
+ other_info.num_stack_entries = stack_trace_save(other_info.stack_entries, NUM_STACK_ENTRIES, 1);
spin_unlock_irqrestore(&report_lock, *flags);
@@ -299,6 +298,7 @@ retry:
}
spin_unlock_irqrestore(&report_lock, *flags);
+
goto retry;
}
@@ -309,9 +309,7 @@ void kcsan_report(const volatile void *ptr, size_t size, bool is_write,
kcsan_disable_current();
if (prepare_report(&flags, ptr, size, is_write, cpu_id, type)) {
- if (print_report(ptr, size, is_write, value_change, cpu_id,
- type) &&
- panic_on_warn)
+ if (print_report(ptr, size, is_write, value_change, cpu_id, type) && panic_on_warn)
panic("panic_on_warn set ...\n");
release_report(&flags, type);
diff --git a/kernel/kcsan/test.c b/kernel/kcsan/test.c
index 0bae63c5ca65..cc6000239dc0 100644
--- a/kernel/kcsan/test.c
+++ b/kernel/kcsan/test.c
@@ -34,7 +34,7 @@ static bool test_encode_decode(void)
if (WARN_ON(!check_encodable(addr, size)))
return false;
- /* encode and decode */
+ /* Encode and decode */
{
const long encoded_watchpoint =
encode_watchpoint(addr, size, is_write);
@@ -42,7 +42,7 @@ static bool test_encode_decode(void)
size_t verif_size;
bool verif_is_write;
- /* check special watchpoints */
+ /* Check special watchpoints */
if (WARN_ON(decode_watchpoint(
INVALID_WATCHPOINT, &verif_masked_addr,
&verif_size, &verif_is_write)))
@@ -52,7 +52,7 @@ static bool test_encode_decode(void)
&verif_size, &verif_is_write)))
return false;
- /* check decoding watchpoint returns same data */
+ /* Check decoding watchpoint returns same data */
if (WARN_ON(!decode_watchpoint(
encoded_watchpoint, &verif_masked_addr,
&verif_size, &verif_is_write)))