summaryrefslogtreecommitdiffstats
path: root/tools/testing
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing')
-rwxr-xr-xtools/testing/kunit/kunit.py30
-rw-r--r--tools/testing/kunit/kunit_config.py13
-rw-r--r--tools/testing/kunit/kunit_kernel.py18
-rwxr-xr-xtools/testing/kunit/kunit_tool_test.py204
-rw-r--r--tools/testing/scatterlist/main.c1
-rw-r--r--tools/testing/selftests/Makefile10
-rw-r--r--tools/testing/selftests/arm64/mte/check_buffer_fill.c2
-rw-r--r--tools/testing/selftests/breakpoints/breakpoint_test_arm64.c4
-rw-r--r--tools/testing/selftests/dma/dma_map_benchmark.c21
-rw-r--r--tools/testing/selftests/dmabuf-heaps/Makefile2
-rw-r--r--tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c149
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc4
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc35
-rw-r--r--tools/testing/selftests/gpio/Makefile26
-rw-r--r--tools/testing/selftests/gpio/config1
-rw-r--r--tools/testing/selftests/gpio/gpio-mockup-cdev.c198
-rw-r--r--tools/testing/selftests/gpio/gpio-mockup-chardev.c323
-rwxr-xr-xtools/testing/selftests/gpio/gpio-mockup-sysfs.sh168
-rwxr-xr-xtools/testing/selftests/gpio/gpio-mockup.sh497
-rw-r--r--tools/testing/selftests/ipc/msgque.c6
-rwxr-xr-xtools/testing/selftests/kselftest_deps.sh4
-rw-r--r--tools/testing/selftests/kselftest_harness.h26
-rw-r--r--tools/testing/selftests/kselftest_module.h18
-rw-r--r--tools/testing/selftests/kvm/.gitignore6
-rw-r--r--tools/testing/selftests/kvm/Makefile6
-rw-r--r--tools/testing/selftests/kvm/demand_paging_test.c43
-rw-r--r--tools/testing/selftests/kvm/dirty_log_perf_test.c92
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h6
-rw-r--r--tools/testing/selftests/kvm/include/numaif.h55
-rw-r--r--tools/testing/selftests/kvm/include/perf_test_util.h7
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h16
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h41
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c1
-rw-r--r--tools/testing/selftests/kvm/lib/perf_test_util.c31
-rw-r--r--tools/testing/selftests/kvm/lib/test_util.c31
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c144
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/svm.c8
-rw-r--r--tools/testing/selftests/kvm/memslot_modification_stress_test.c212
-rw-r--r--tools/testing/selftests/kvm/settings1
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c3
-rw-r--r--tools/testing/selftests/kvm/x86_64/get_cpuid_test.c175
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c31
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c131
-rw-r--r--tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c544
-rw-r--r--tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c167
-rw-r--r--tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c149
-rw-r--r--tools/testing/selftests/mount_setattr/.gitignore1
-rw-r--r--tools/testing/selftests/mount_setattr/Makefile7
-rw-r--r--tools/testing/selftests/mount_setattr/config1
-rw-r--r--tools/testing/selftests/mount_setattr/mount_setattr_test.c1424
-rwxr-xr-xtools/testing/selftests/powerpc/eeh/eeh-basic.sh41
-rw-r--r--[-rwxr-xr-x]tools/testing/selftests/powerpc/eeh/eeh-functions.sh168
-rwxr-xr-xtools/testing/selftests/powerpc/eeh/eeh-vf-aware.sh45
-rwxr-xr-xtools/testing/selftests/powerpc/eeh/eeh-vf-unaware.sh35
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c10
-rw-r--r--tools/testing/selftests/timens/.gitignore1
-rw-r--r--tools/testing/selftests/vDSO/vdso_config.h4
-rwxr-xr-xtools/testing/selftests/wireguard/netns.sh15
-rw-r--r--tools/testing/selftests/x86/ldt_gdt.c2
59 files changed, 4457 insertions, 957 deletions
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index e808a47c839b..d5144fcb03ac 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -28,12 +28,12 @@ KunitBuildRequest = namedtuple('KunitBuildRequest',
['jobs', 'build_dir', 'alltests',
'make_options'])
KunitExecRequest = namedtuple('KunitExecRequest',
- ['timeout', 'build_dir', 'alltests'])
+ ['timeout', 'build_dir', 'alltests', 'filter_glob'])
KunitParseRequest = namedtuple('KunitParseRequest',
['raw_output', 'input_data', 'build_dir', 'json'])
KunitRequest = namedtuple('KunitRequest', ['raw_output','timeout', 'jobs',
- 'build_dir', 'alltests', 'json',
- 'make_options'])
+ 'build_dir', 'alltests', 'filter_glob',
+ 'json', 'make_options'])
KernelDirectoryPath = sys.argv[0].split('tools/testing/kunit/')[0]
@@ -93,6 +93,7 @@ def exec_tests(linux: kunit_kernel.LinuxSourceTree,
test_start = time.time()
result = linux.run_kernel(
timeout=None if request.alltests else request.timeout,
+ filter_glob=request.filter_glob,
build_dir=request.build_dir)
test_end = time.time()
@@ -149,7 +150,7 @@ def run_tests(linux: kunit_kernel.LinuxSourceTree,
return build_result
exec_request = KunitExecRequest(request.timeout, request.build_dir,
- request.alltests)
+ request.alltests, request.filter_glob)
exec_result = exec_tests(linux, exec_request)
if exec_result.status != KunitStatus.SUCCESS:
return exec_result
@@ -182,6 +183,9 @@ def add_common_opts(parser) -> None:
parser.add_argument('--alltests',
help='Run all KUnit tests through allyesconfig',
action='store_true')
+ parser.add_argument('--kunitconfig',
+ help='Path to Kconfig fragment that enables KUnit tests',
+ metavar='kunitconfig')
def add_build_opts(parser) -> None:
parser.add_argument('--jobs',
@@ -197,6 +201,14 @@ def add_exec_opts(parser) -> None:
type=int,
default=300,
metavar='timeout')
+ parser.add_argument('filter_glob',
+ help='maximum number of seconds to allow for all tests '
+ 'to run. This does not include time taken to build the '
+ 'tests.',
+ type=str,
+ nargs='?',
+ default='',
+ metavar='filter_glob')
def add_parse_opts(parser) -> None:
parser.add_argument('--raw_output', help='don\'t format output from kernel',
@@ -256,13 +268,14 @@ def main(argv, linux=None):
os.mkdir(cli_args.build_dir)
if not linux:
- linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir)
+ linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir, kunitconfig_path=cli_args.kunitconfig)
request = KunitRequest(cli_args.raw_output,
cli_args.timeout,
cli_args.jobs,
cli_args.build_dir,
cli_args.alltests,
+ cli_args.filter_glob,
cli_args.json,
cli_args.make_options)
result = run_tests(linux, request)
@@ -274,7 +287,7 @@ def main(argv, linux=None):
os.mkdir(cli_args.build_dir)
if not linux:
- linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir)
+ linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir, kunitconfig_path=cli_args.kunitconfig)
request = KunitConfigRequest(cli_args.build_dir,
cli_args.make_options)
@@ -286,7 +299,7 @@ def main(argv, linux=None):
sys.exit(1)
elif cli_args.subcommand == 'build':
if not linux:
- linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir)
+ linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir, kunitconfig_path=cli_args.kunitconfig)
request = KunitBuildRequest(cli_args.jobs,
cli_args.build_dir,
@@ -304,7 +317,8 @@ def main(argv, linux=None):
exec_request = KunitExecRequest(cli_args.timeout,
cli_args.build_dir,
- cli_args.alltests)
+ cli_args.alltests,
+ cli_args.filter_glob)
exec_result = exec_tests(linux, exec_request)
parse_request = KunitParseRequest(cli_args.raw_output,
exec_result.result,
diff --git a/tools/testing/kunit/kunit_config.py b/tools/testing/kunit/kunit_config.py
index bdd60230764b..0b550cbd667d 100644
--- a/tools/testing/kunit/kunit_config.py
+++ b/tools/testing/kunit/kunit_config.py
@@ -41,15 +41,14 @@ class Kconfig(object):
self._entries.append(entry)
def is_subset_of(self, other: 'Kconfig') -> bool:
+ other_dict = {e.name: e.value for e in other.entries()}
for a in self.entries():
- found = False
- for b in other.entries():
- if a.name != b.name:
+ b = other_dict.get(a.name)
+ if b is None:
+ if a.value == 'n':
continue
- if a.value != b.value:
- return False
- found = True
- if a.value != 'n' and found == False:
+ return False
+ elif a.value != b:
return False
return True
diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
index 2076a5a2d060..f309a33256cd 100644
--- a/tools/testing/kunit/kunit_kernel.py
+++ b/tools/testing/kunit/kunit_kernel.py
@@ -123,7 +123,7 @@ def get_outfile_path(build_dir) -> str:
class LinuxSourceTree(object):
"""Represents a Linux kernel source tree with KUnit tests."""
- def __init__(self, build_dir: str, load_config=True, defconfig=DEFAULT_KUNITCONFIG_PATH) -> None:
+ def __init__(self, build_dir: str, load_config=True, kunitconfig_path='') -> None:
signal.signal(signal.SIGINT, self.signal_handler)
self._ops = LinuxSourceTreeOperations()
@@ -131,9 +131,13 @@ class LinuxSourceTree(object):
if not load_config:
return
- kunitconfig_path = get_kunitconfig_path(build_dir)
- if not os.path.exists(kunitconfig_path):
- shutil.copyfile(defconfig, kunitconfig_path)
+ if kunitconfig_path:
+ if not os.path.exists(kunitconfig_path):
+ raise ConfigError(f'Specified kunitconfig ({kunitconfig_path}) does not exist')
+ else:
+ kunitconfig_path = get_kunitconfig_path(build_dir)
+ if not os.path.exists(kunitconfig_path):
+ shutil.copyfile(DEFAULT_KUNITCONFIG_PATH, kunitconfig_path)
self._kconfig = kunit_config.Kconfig()
self._kconfig.read_from_file(kunitconfig_path)
@@ -199,8 +203,12 @@ class LinuxSourceTree(object):
return False
return self.validate_config(build_dir)
- def run_kernel(self, args=[], build_dir='', timeout=None) -> Iterator[str]:
+ def run_kernel(self, args=None, build_dir='', filter_glob='', timeout=None) -> Iterator[str]:
+ if not args:
+ args = []
args.extend(['mem=1G', 'console=tty'])
+ if filter_glob:
+ args.append('kunit.filter_glob='+filter_glob)
self._ops.linux_bin(args, timeout, build_dir)
outfile = get_outfile_path(build_dir)
subprocess.call(['stty', 'sane'])
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index b593f4448e83..1ad3049e9069 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -12,6 +12,7 @@ from unittest import mock
import tempfile, shutil # Handling test_tmpdir
import json
+import signal
import os
import kunit_config
@@ -21,16 +22,18 @@ import kunit_json
import kunit
test_tmpdir = ''
+abs_test_data_dir = ''
def setUpModule():
- global test_tmpdir
+ global test_tmpdir, abs_test_data_dir
test_tmpdir = tempfile.mkdtemp()
+ abs_test_data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'test_data'))
def tearDownModule():
shutil.rmtree(test_tmpdir)
-def get_absolute_path(path):
- return os.path.join(os.path.dirname(__file__), path)
+def test_data_path(path):
+ return os.path.join(abs_test_data_dir, path)
class KconfigTest(unittest.TestCase):
@@ -46,8 +49,7 @@ class KconfigTest(unittest.TestCase):
def test_read_from_file(self):
kconfig = kunit_config.Kconfig()
- kconfig_path = get_absolute_path(
- 'test_data/test_read_from_file.kconfig')
+ kconfig_path = test_data_path('test_read_from_file.kconfig')
kconfig.read_from_file(kconfig_path)
@@ -98,21 +100,18 @@ class KUnitParserTest(unittest.TestCase):
str(needle) + '" not found in "' + str(haystack) + '"!')
def test_output_isolated_correctly(self):
- log_path = get_absolute_path(
- 'test_data/test_output_isolated_correctly.log')
- file = open(log_path)
- result = kunit_parser.isolate_kunit_output(file.readlines())
+ log_path = test_data_path('test_output_isolated_correctly.log')
+ with open(log_path) as file:
+ result = kunit_parser.isolate_kunit_output(file.readlines())
self.assertContains('TAP version 14', result)
self.assertContains(' # Subtest: example', result)
self.assertContains(' 1..2', result)
self.assertContains(' ok 1 - example_simple_test', result)
self.assertContains(' ok 2 - example_mock_test', result)
self.assertContains('ok 1 - example', result)
- file.close()
def test_output_with_prefix_isolated_correctly(self):
- log_path = get_absolute_path(
- 'test_data/test_pound_sign.log')
+ log_path = test_data_path('test_pound_sign.log')
with open(log_path) as file:
result = kunit_parser.isolate_kunit_output(file.readlines())
self.assertContains('TAP version 14', result)
@@ -141,61 +140,51 @@ class KUnitParserTest(unittest.TestCase):
self.assertContains('ok 3 - string-stream-test', result)
def test_parse_successful_test_log(self):
- all_passed_log = get_absolute_path(
- 'test_data/test_is_test_passed-all_passed.log')
- file = open(all_passed_log)
- result = kunit_parser.parse_run_tests(file.readlines())
+ all_passed_log = test_data_path('test_is_test_passed-all_passed.log')
+ with open(all_passed_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- file.close()
def test_parse_failed_test_log(self):
- failed_log = get_absolute_path(
- 'test_data/test_is_test_passed-failure.log')
- file = open(failed_log)
- result = kunit_parser.parse_run_tests(file.readlines())
+ failed_log = test_data_path('test_is_test_passed-failure.log')
+ with open(failed_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
kunit_parser.TestStatus.FAILURE,
result.status)
- file.close()
def test_no_tests(self):
- empty_log = get_absolute_path(
- 'test_data/test_is_test_passed-no_tests_run.log')
- file = open(empty_log)
- result = kunit_parser.parse_run_tests(
- kunit_parser.isolate_kunit_output(file.readlines()))
+ empty_log = test_data_path('test_is_test_passed-no_tests_run.log')
+ with open(empty_log) as file:
+ result = kunit_parser.parse_run_tests(
+ kunit_parser.isolate_kunit_output(file.readlines()))
self.assertEqual(0, len(result.suites))
self.assertEqual(
kunit_parser.TestStatus.NO_TESTS,
result.status)
- file.close()
def test_no_kunit_output(self):
- crash_log = get_absolute_path(
- 'test_data/test_insufficient_memory.log')
- file = open(crash_log)
+ crash_log = test_data_path('test_insufficient_memory.log')
print_mock = mock.patch('builtins.print').start()
- result = kunit_parser.parse_run_tests(
- kunit_parser.isolate_kunit_output(file.readlines()))
+ with open(crash_log) as file:
+ result = kunit_parser.parse_run_tests(
+ kunit_parser.isolate_kunit_output(file.readlines()))
print_mock.assert_any_call(StrContains('no tests run!'))
print_mock.stop()
file.close()
def test_crashed_test(self):
- crashed_log = get_absolute_path(
- 'test_data/test_is_test_passed-crash.log')
- file = open(crashed_log)
- result = kunit_parser.parse_run_tests(file.readlines())
+ crashed_log = test_data_path('test_is_test_passed-crash.log')
+ with open(crashed_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
kunit_parser.TestStatus.TEST_CRASHED,
result.status)
- file.close()
def test_ignores_prefix_printk_time(self):
- prefix_log = get_absolute_path(
- 'test_data/test_config_printk_time.log')
+ prefix_log = test_data_path('test_config_printk_time.log')
with open(prefix_log) as file:
result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
@@ -204,8 +193,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual('kunit-resource-test', result.suites[0].name)
def test_ignores_multiple_prefixes(self):
- prefix_log = get_absolute_path(
- 'test_data/test_multiple_prefixes.log')
+ prefix_log = test_data_path('test_multiple_prefixes.log')
with open(prefix_log) as file:
result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
@@ -214,8 +202,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual('kunit-resource-test', result.suites[0].name)
def test_prefix_mixed_kernel_output(self):
- mixed_prefix_log = get_absolute_path(
- 'test_data/test_interrupted_tap_output.log')
+ mixed_prefix_log = test_data_path('test_interrupted_tap_output.log')
with open(mixed_prefix_log) as file:
result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
@@ -224,7 +211,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual('kunit-resource-test', result.suites[0].name)
def test_prefix_poundsign(self):
- pound_log = get_absolute_path('test_data/test_pound_sign.log')
+ pound_log = test_data_path('test_pound_sign.log')
with open(pound_log) as file:
result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
@@ -233,7 +220,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual('kunit-resource-test', result.suites[0].name)
def test_kernel_panic_end(self):
- panic_log = get_absolute_path('test_data/test_kernel_panic_interrupt.log')
+ panic_log = test_data_path('test_kernel_panic_interrupt.log')
with open(panic_log) as file:
result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
@@ -242,7 +229,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual('kunit-resource-test', result.suites[0].name)
def test_pound_no_prefix(self):
- pound_log = get_absolute_path('test_data/test_pound_no_prefix.log')
+ pound_log = test_data_path('test_pound_no_prefix.log')
with open(pound_log) as file:
result = kunit_parser.parse_run_tests(file.readlines())
self.assertEqual(
@@ -250,10 +237,27 @@ class KUnitParserTest(unittest.TestCase):
result.status)
self.assertEqual('kunit-resource-test', result.suites[0].name)
+class LinuxSourceTreeTest(unittest.TestCase):
+
+ def setUp(self):
+ mock.patch.object(signal, 'signal').start()
+ self.addCleanup(mock.patch.stopall)
+
+ def test_invalid_kunitconfig(self):
+ with self.assertRaisesRegex(kunit_kernel.ConfigError, 'nonexistent.* does not exist'):
+ kunit_kernel.LinuxSourceTree('', kunitconfig_path='/nonexistent_file')
+
+ def test_valid_kunitconfig(self):
+ with tempfile.NamedTemporaryFile('wt') as kunitconfig:
+ tree = kunit_kernel.LinuxSourceTree('', kunitconfig_path=kunitconfig.name)
+
+ # TODO: add more test cases.
+
+
class KUnitJsonTest(unittest.TestCase):
def _json_for(self, log_file):
- with(open(get_absolute_path(log_file))) as file:
+ with open(test_data_path(log_file)) as file:
test_result = kunit_parser.parse_run_tests(file)
json_obj = kunit_json.get_json_result(
test_result=test_result,
@@ -263,22 +267,19 @@ class KUnitJsonTest(unittest.TestCase):
return json.loads(json_obj)
def test_failed_test_json(self):
- result = self._json_for(
- 'test_data/test_is_test_passed-failure.log')
+ result = self._json_for('test_is_test_passed-failure.log')
self.assertEqual(
{'name': 'example_simple_test', 'status': 'FAIL'},
result["sub_groups"][1]["test_cases"][0])
def test_crashed_test_json(self):
- result = self._json_for(
- 'test_data/test_is_test_passed-crash.log')
+ result = self._json_for('test_is_test_passed-crash.log')
self.assertEqual(
{'name': 'example_simple_test', 'status': 'ERROR'},
result["sub_groups"][1]["test_cases"][0])
def test_no_tests_json(self):
- result = self._json_for(
- 'test_data/test_is_test_passed-no_tests_run.log')
+ result = self._json_for('test_is_test_passed-no_tests_run.log')
self.assertEqual(0, len(result['sub_groups']))
class StrContains(str):
@@ -287,106 +288,104 @@ class StrContains(str):
class KUnitMainTest(unittest.TestCase):
def setUp(self):
- path = get_absolute_path('test_data/test_is_test_passed-all_passed.log')
- file = open(path)
- all_passed_log = file.readlines()
- self.print_patch = mock.patch('builtins.print')
- self.print_mock = self.print_patch.start()
+ path = test_data_path('test_is_test_passed-all_passed.log')
+ with open(path) as file:
+ all_passed_log = file.readlines()
+
+ self.print_mock = mock.patch('builtins.print').start()
+ self.addCleanup(mock.patch.stopall)
+
self.linux_source_mock = mock.Mock()
self.linux_source_mock.build_reconfig = mock.Mock(return_value=True)
self.linux_source_mock.build_um_kernel = mock.Mock(return_value=True)
self.linux_source_mock.run_kernel = mock.Mock(return_value=all_passed_log)
- def tearDown(self):
- self.print_patch.stop()
- pass
-
def test_config_passes_args_pass(self):
kunit.main(['config', '--build_dir=.kunit'], self.linux_source_mock)
- assert self.linux_source_mock.build_reconfig.call_count == 1
- assert self.linux_source_mock.run_kernel.call_count == 0
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
+ self.assertEqual(self.linux_source_mock.run_kernel.call_count, 0)
def test_build_passes_args_pass(self):
kunit.main(['build'], self.linux_source_mock)
- assert self.linux_source_mock.build_reconfig.call_count == 0
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 0)
self.linux_source_mock.build_um_kernel.assert_called_once_with(False, 8, '.kunit', None)
- assert self.linux_source_mock.run_kernel.call_count == 0
+ self.assertEqual(self.linux_source_mock.run_kernel.call_count, 0)
def test_exec_passes_args_pass(self):
kunit.main(['exec'], self.linux_source_mock)
- assert self.linux_source_mock.build_reconfig.call_count == 0
- assert self.linux_source_mock.run_kernel.call_count == 1
- self.linux_source_mock.run_kernel.assert_called_once_with(build_dir='.kunit', timeout=300)
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 0)
+ self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+ build_dir='.kunit', filter_glob='', timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_passes_args_pass(self):
kunit.main(['run'], self.linux_source_mock)
- assert self.linux_source_mock.build_reconfig.call_count == 1
- assert self.linux_source_mock.run_kernel.call_count == 1
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
+ self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
- build_dir='.kunit', timeout=300)
+ build_dir='.kunit', filter_glob='', timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_exec_passes_args_fail(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
with self.assertRaises(SystemExit) as e:
kunit.main(['exec'], self.linux_source_mock)
- assert type(e.exception) == SystemExit
- assert e.exception.code == 1
+ self.assertEqual(e.exception.code, 1)
def test_run_passes_args_fail(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
with self.assertRaises(SystemExit) as e:
kunit.main(['run'], self.linux_source_mock)
- assert type(e.exception) == SystemExit
- assert e.exception.code == 1
- assert self.linux_source_mock.build_reconfig.call_count == 1
- assert self.linux_source_mock.run_kernel.call_count == 1
+ self.assertEqual(e.exception.code, 1)
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
+ self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
self.print_mock.assert_any_call(StrContains(' 0 tests run'))
def test_exec_raw_output(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
kunit.main(['exec', '--raw_output'], self.linux_source_mock)
- assert self.linux_source_mock.run_kernel.call_count == 1
- for kall in self.print_mock.call_args_list:
- assert kall != mock.call(StrContains('Testing complete.'))
- assert kall != mock.call(StrContains(' 0 tests run'))
+ self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
+ for call in self.print_mock.call_args_list:
+ self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
+ self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
def test_run_raw_output(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
kunit.main(['run', '--raw_output'], self.linux_source_mock)
- assert self.linux_source_mock.build_reconfig.call_count == 1
- assert self.linux_source_mock.run_kernel.call_count == 1
- for kall in self.print_mock.call_args_list:
- assert kall != mock.call(StrContains('Testing complete.'))
- assert kall != mock.call(StrContains(' 0 tests run'))
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
+ self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
+ for call in self.print_mock.call_args_list:
+ self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
+ self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
def test_exec_timeout(self):
timeout = 3453
kunit.main(['exec', '--timeout', str(timeout)], self.linux_source_mock)
- self.linux_source_mock.run_kernel.assert_called_once_with(build_dir='.kunit', timeout=timeout)
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+ build_dir='.kunit', filter_glob='', timeout=timeout)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_timeout(self):
timeout = 3453
kunit.main(['run', '--timeout', str(timeout)], self.linux_source_mock)
- assert self.linux_source_mock.build_reconfig.call_count == 1
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
- build_dir='.kunit', timeout=timeout)
+ build_dir='.kunit', filter_glob='', timeout=timeout)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_builddir(self):
build_dir = '.kunit'
kunit.main(['run', '--build_dir=.kunit'], self.linux_source_mock)
- assert self.linux_source_mock.build_reconfig.call_count == 1
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
- build_dir=build_dir, timeout=300)
+ build_dir=build_dir, filter_glob='', timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_config_builddir(self):
build_dir = '.kunit'
kunit.main(['config', '--build_dir', build_dir], self.linux_source_mock)
- assert self.linux_source_mock.build_reconfig.call_count == 1
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
def test_build_builddir(self):
build_dir = '.kunit'
@@ -396,8 +395,23 @@ class KUnitMainTest(unittest.TestCase):
def test_exec_builddir(self):
build_dir = '.kunit'
kunit.main(['exec', '--build_dir', build_dir], self.linux_source_mock)
- self.linux_source_mock.run_kernel.assert_called_once_with(build_dir=build_dir, timeout=300)
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+ build_dir=build_dir, filter_glob='', timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
+ @mock.patch.object(kunit_kernel, 'LinuxSourceTree')
+ def test_run_kunitconfig(self, mock_linux_init):
+ mock_linux_init.return_value = self.linux_source_mock
+ kunit.main(['run', '--kunitconfig=mykunitconfig'])
+ # Just verify that we parsed and initialized it correctly here.
+ mock_linux_init.assert_called_once_with('.kunit', kunitconfig_path='mykunitconfig')
+
+ @mock.patch.object(kunit_kernel, 'LinuxSourceTree')
+ def test_config_kunitconfig(self, mock_linux_init):
+ mock_linux_init.return_value = self.linux_source_mock
+ kunit.main(['config', '--kunitconfig=mykunitconfig'])
+ # Just verify that we parsed and initialized it correctly here.
+ mock_linux_init.assert_called_once_with('.kunit', kunitconfig_path='mykunitconfig')
+
if __name__ == '__main__':
unittest.main()
diff --git a/tools/testing/scatterlist/main.c b/tools/testing/scatterlist/main.c
index 71c960dcd8a4..652254754b4c 100644
--- a/tools/testing/scatterlist/main.c
+++ b/tools/testing/scatterlist/main.c
@@ -55,7 +55,6 @@ int main(void)
struct test *test, tests[] = {
{ -EINVAL, 1, pfn(0), NULL, PAGE_SIZE, 0, 1 },
{ 0, 1, pfn(0), NULL, PAGE_SIZE, PAGE_SIZE + 1, 1 },
- { 0, 1, pfn(0), NULL, PAGE_SIZE, sgmax + 1, 1 },
{ 0, 1, pfn(0), NULL, PAGE_SIZE, sgmax, 1 },
{ 0, 1, pfn(0), NULL, 1, sgmax, 1 },
{ 0, 2, pfn(0, 1), NULL, 2 * PAGE_SIZE, sgmax, 1 },
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index c42aacec5038..6c575cf34a71 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -33,6 +33,7 @@ TARGETS += memfd
TARGETS += memory-hotplug
TARGETS += mincore
TARGETS += mount
+TARGETS += mount_setattr
TARGETS += mqueue
TARGETS += nci
TARGETS += net
@@ -126,15 +127,6 @@ ARCH ?= $(SUBARCH)
export KSFT_KHDR_INSTALL_DONE := 1
export BUILD
-# build and run gpio when output directory is the src dir.
-# gpio has dependency on tools/gpio and builds tools/gpio
-# objects in the src directory in all cases making the src
-# repo dirty even when objects are relocated.
-ifneq (1,$(DEFAULT_INSTALL_HDR_PATH))
- TMP := $(filter-out gpio, $(TARGETS))
- TARGETS := $(TMP)
-endif
-
# set default goal to all, so make without a target runs all, even when
# all isn't the first target in the file.
.DEFAULT_GOAL := all
diff --git a/tools/testing/selftests/arm64/mte/check_buffer_fill.c b/tools/testing/selftests/arm64/mte/check_buffer_fill.c
index c9fa141ebdcc..75fc482d63b6 100644
--- a/tools/testing/selftests/arm64/mte/check_buffer_fill.c
+++ b/tools/testing/selftests/arm64/mte/check_buffer_fill.c
@@ -81,7 +81,7 @@ static int check_buffer_underflow_by_byte(int mem_type, int mode,
last_index = 0;
/* Set some value in tagged memory and make the buffer underflow */
for (j = sizes[i] - 1; (j >= -underflow_range) &&
- (cur_mte_cxt.fault_valid == false); j--) {
+ (!cur_mte_cxt.fault_valid); j--) {
ptr[j] = '1';
last_index = j;
}
diff --git a/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c b/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
index ad41ea69001b..e7041816085a 100644
--- a/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
+++ b/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
@@ -145,7 +145,7 @@ static bool run_test(int wr_size, int wp_size, int wr, int wp)
if (ptrace(PTRACE_CONT, pid, NULL, NULL) < 0) {
ksft_print_msg(
- "ptrace(PTRACE_SINGLESTEP) failed: %s\n",
+ "ptrace(PTRACE_CONT) failed: %s\n",
strerror(errno));
return false;
}
@@ -159,7 +159,7 @@ static bool run_test(int wr_size, int wp_size, int wr, int wp)
}
alarm(0);
if (WIFEXITED(status)) {
- ksft_print_msg("child did not single-step\n");
+ ksft_print_msg("child exited prematurely\n");
return false;
}
if (!WIFSTOPPED(status)) {
diff --git a/tools/testing/selftests/dma/dma_map_benchmark.c b/tools/testing/selftests/dma/dma_map_benchmark.c
index 537d65968c48..fb23ce9617ea 100644
--- a/tools/testing/selftests/dma/dma_map_benchmark.c
+++ b/tools/testing/selftests/dma/dma_map_benchmark.c
@@ -12,9 +12,12 @@
#include <sys/mman.h>
#include <linux/types.h>
+#define NSEC_PER_MSEC 1000000L
+
#define DMA_MAP_BENCHMARK _IOWR('d', 1, struct map_benchmark)
#define DMA_MAP_MAX_THREADS 1024
#define DMA_MAP_MAX_SECONDS 300
+#define DMA_MAP_MAX_TRANS_DELAY (10 * NSEC_PER_MSEC)
#define DMA_MAP_BIDIRECTIONAL 0
#define DMA_MAP_TO_DEVICE 1
@@ -36,7 +39,8 @@ struct map_benchmark {
__s32 node; /* which numa node this benchmark will run on */
__u32 dma_bits; /* DMA addressing capability */
__u32 dma_dir; /* DMA data direction */
- __u8 expansion[84]; /* For future use */
+ __u32 dma_trans_ns; /* time for DMA transmission in ns */
+ __u8 expansion[80]; /* For future use */
};
int main(int argc, char **argv)
@@ -46,12 +50,12 @@ int main(int argc, char **argv)
/* default single thread, run 20 seconds on NUMA_NO_NODE */
int threads = 1, seconds = 20, node = -1;
/* default dma mask 32bit, bidirectional DMA */
- int bits = 32, dir = DMA_MAP_BIDIRECTIONAL;
+ int bits = 32, xdelay = 0, dir = DMA_MAP_BIDIRECTIONAL;
int cmd = DMA_MAP_BENCHMARK;
char *p;
- while ((opt = getopt(argc, argv, "t:s:n:b:d:")) != -1) {
+ while ((opt = getopt(argc, argv, "t:s:n:b:d:x:")) != -1) {
switch (opt) {
case 't':
threads = atoi(optarg);
@@ -68,6 +72,9 @@ int main(int argc, char **argv)
case 'd':
dir = atoi(optarg);
break;
+ case 'x':
+ xdelay = atoi(optarg);
+ break;
default:
return -1;
}
@@ -85,6 +92,12 @@ int main(int argc, char **argv)
exit(1);
}
+ if (xdelay < 0 || xdelay > DMA_MAP_MAX_TRANS_DELAY) {
+ fprintf(stderr, "invalid transmit delay, must be in 0-%ld\n",
+ DMA_MAP_MAX_TRANS_DELAY);
+ exit(1);
+ }
+
/* suppose the mininum DMA zone is 1MB in the world */
if (bits < 20 || bits > 64) {
fprintf(stderr, "invalid dma mask bit, must be in 20-64\n");
@@ -109,6 +122,8 @@ int main(int argc, char **argv)
map.node = node;
map.dma_bits = bits;
map.dma_dir = dir;
+ map.dma_trans_ns = xdelay;
+
if (ioctl(fd, cmd, &map)) {
perror("ioctl");
exit(1);
diff --git a/tools/testing/selftests/dmabuf-heaps/Makefile b/tools/testing/selftests/dmabuf-heaps/Makefile
index 607c2acd2082..604b43ece15f 100644
--- a/tools/testing/selftests/dmabuf-heaps/Makefile
+++ b/tools/testing/selftests/dmabuf-heaps/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-CFLAGS += -static -O3 -Wl,-no-as-needed -Wall -I../../../../usr/include
+CFLAGS += -static -O3 -Wl,-no-as-needed -Wall
TEST_GEN_PROGS = dmabuf-heap
diff --git a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
index 909da9cdda97..29af27acd40e 100644
--- a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
+++ b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
@@ -130,16 +130,13 @@ static int dmabuf_heap_alloc(int fd, size_t len, unsigned int flags,
dmabuf_fd);
}
-static void dmabuf_sync(int fd, int start_stop)
+static int dmabuf_sync(int fd, int start_stop)
{
struct dma_buf_sync sync = {
.flags = start_stop | DMA_BUF_SYNC_RW,
};
- int ret;
- ret = ioctl(fd, DMA_BUF_IOCTL_SYNC, &sync);
- if (ret)
- printf("sync failed %d\n", errno);
+ return ioctl(fd, DMA_BUF_IOCTL_SYNC, &sync);
}
#define ONE_MEG (1024 * 1024)
@@ -151,16 +148,14 @@ static int test_alloc_and_import(char *heap_name)
void *p = NULL;
int ret;
- printf("Testing heap: %s\n", heap_name);
-
heap_fd = dmabuf_heap_open(heap_name);
if (heap_fd < 0)
return -1;
- printf("Allocating 1 MEG\n");
+ printf(" Testing allocation and importing: ");
ret = dmabuf_heap_alloc(heap_fd, ONE_MEG, 0, &dmabuf_fd);
if (ret) {
- printf("Allocation Failed!\n");
+ printf("FAIL (Allocation Failed!)\n");
ret = -1;
goto out;
}
@@ -172,11 +167,10 @@ static int test_alloc_and_import(char *heap_name)
dmabuf_fd,
0);
if (p == MAP_FAILED) {
- printf("mmap() failed: %m\n");
+ printf("FAIL (mmap() failed)\n");
ret = -1;
goto out;
}
- printf("mmap passed\n");
dmabuf_sync(dmabuf_fd, DMA_BUF_SYNC_START);
memset(p, 1, ONE_MEG / 2);
@@ -186,25 +180,31 @@ static int test_alloc_and_import(char *heap_name)
importer_fd = open_vgem();
if (importer_fd < 0) {
ret = importer_fd;
- printf("Failed to open vgem\n");
- goto out;
+ printf("(Could not open vgem - skipping): ");
+ } else {
+ ret = import_vgem_fd(importer_fd, dmabuf_fd, &handle);
+ if (ret < 0) {
+ printf("FAIL (Failed to import buffer)\n");
+ goto out;
+ }
}
- ret = import_vgem_fd(importer_fd, dmabuf_fd, &handle);
+ ret = dmabuf_sync(dmabuf_fd, DMA_BUF_SYNC_START);
if (ret < 0) {
- printf("Failed to import buffer\n");
+ printf("FAIL (DMA_BUF_SYNC_START failed!)\n");
goto out;
}
- printf("import passed\n");
- dmabuf_sync(dmabuf_fd, DMA_BUF_SYNC_START);
memset(p, 0xff, ONE_MEG);
- dmabuf_sync(dmabuf_fd, DMA_BUF_SYNC_END);
- printf("syncs passed\n");
+ ret = dmabuf_sync(dmabuf_fd, DMA_BUF_SYNC_END);
+ if (ret < 0) {
+ printf("FAIL (DMA_BUF_SYNC_END failed!)\n");
+ goto out;
+ }
close_handle(importer_fd, handle);
ret = 0;
-
+ printf(" OK\n");
out:
if (p)
munmap(p, ONE_MEG);
@@ -218,6 +218,84 @@ out:
return ret;
}
+static int test_alloc_zeroed(char *heap_name, size_t size)
+{
+ int heap_fd = -1, dmabuf_fd[32];
+ int i, j, ret;
+ void *p = NULL;
+ char *c;
+
+ printf(" Testing alloced %ldk buffers are zeroed: ", size / 1024);
+ heap_fd = dmabuf_heap_open(heap_name);
+ if (heap_fd < 0)
+ return -1;
+
+ /* Allocate and fill a bunch of buffers */
+ for (i = 0; i < 32; i++) {
+ ret = dmabuf_heap_alloc(heap_fd, size, 0, &dmabuf_fd[i]);
+ if (ret < 0) {
+ printf("FAIL (Allocation (%i) failed)\n", i);
+ goto out;
+ }
+ /* mmap and fill with simple pattern */
+ p = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, dmabuf_fd[i], 0);
+ if (p == MAP_FAILED) {
+ printf("FAIL (mmap() failed!)\n");
+ ret = -1;
+ goto out;
+ }
+ dmabuf_sync(dmabuf_fd[i], DMA_BUF_SYNC_START);
+ memset(p, 0xff, size);
+ dmabuf_sync(dmabuf_fd[i], DMA_BUF_SYNC_END);
+ munmap(p, size);
+ }
+ /* close them all */
+ for (i = 0; i < 32; i++)
+ close(dmabuf_fd[i]);
+
+ /* Allocate and validate all buffers are zeroed */
+ for (i = 0; i < 32; i++) {
+ ret = dmabuf_heap_alloc(heap_fd, size, 0, &dmabuf_fd[i]);
+ if (ret < 0) {
+ printf("FAIL (Allocation (%i) failed)\n", i);
+ goto out;
+ }
+
+ /* mmap and validate everything is zero */
+ p = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, dmabuf_fd[i], 0);
+ if (p == MAP_FAILED) {
+ printf("FAIL (mmap() failed!)\n");
+ ret = -1;
+ goto out;
+ }
+ dmabuf_sync(dmabuf_fd[i], DMA_BUF_SYNC_START);
+ c = (char *)p;
+ for (j = 0; j < size; j++) {
+ if (c[j] != 0) {
+ printf("FAIL (Allocated buffer not zeroed @ %i)\n", j);
+ break;
+ }
+ }
+ dmabuf_sync(dmabuf_fd[i], DMA_BUF_SYNC_END);
+ munmap(p, size);
+ }
+ /* close them all */
+ for (i = 0; i < 32; i++)
+ close(dmabuf_fd[i]);
+
+ close(heap_fd);
+ printf("OK\n");
+ return 0;
+
+out:
+ while (i > 0) {
+ close(dmabuf_fd[i]);
+ i--;
+ }
+ close(heap_fd);
+ return ret;
+}
+
/* Test the ioctl version compatibility w/ a smaller structure then expected */
static int dmabuf_heap_alloc_older(int fd, size_t len, unsigned int flags,
int *dmabuf_fd)
@@ -292,23 +370,24 @@ static int test_alloc_compat(char *heap_name)
if (heap_fd < 0)
return -1;
- printf("Testing (theoretical)older alloc compat\n");
+ printf(" Testing (theoretical)older alloc compat: ");
ret = dmabuf_heap_alloc_older(heap_fd, ONE_MEG, 0, &dmabuf_fd);
if (ret) {
- printf("Older compat allocation failed!\n");
+ printf("FAIL (Older compat allocation failed!)\n");
ret = -1;
goto out;
}
close(dmabuf_fd);
+ printf("OK\n");
- printf("Testing (theoretical)newer alloc compat\n");
+ printf(" Testing (theoretical)newer alloc compat: ");
ret = dmabuf_heap_alloc_newer(heap_fd, ONE_MEG, 0, &dmabuf_fd);
if (ret) {
- printf("Newer compat allocation failed!\n");
+ printf("FAIL (Newer compat allocation failed!)\n");
ret = -1;
goto out;
}
- printf("Ioctl compatibility tests passed\n");
+ printf("OK\n");
out:
if (dmabuf_fd >= 0)
close(dmabuf_fd);
@@ -327,17 +406,17 @@ static int test_alloc_errors(char *heap_name)
if (heap_fd < 0)
return -1;
- printf("Testing expected error cases\n");
+ printf(" Testing expected error cases: ");
ret = dmabuf_heap_alloc(0, ONE_MEG, 0x111111, &dmabuf_fd);
if (!ret) {
- printf("Did not see expected error (invalid fd)!\n");
+ printf("FAIL (Did not see expected error (invalid fd)!)\n");
ret = -1;
goto out;
}
ret = dmabuf_heap_alloc(heap_fd, ONE_MEG, 0x111111, &dmabuf_fd);
if (!ret) {
- printf("Did not see expected error (invalid heap flags)!\n");
+ printf("FAIL (Did not see expected error (invalid heap flags)!)\n");
ret = -1;
goto out;
}
@@ -345,12 +424,12 @@ static int test_alloc_errors(char *heap_name)
ret = dmabuf_heap_alloc_fdflags(heap_fd, ONE_MEG,
~(O_RDWR | O_CLOEXEC), 0, &dmabuf_fd);
if (!ret) {
- printf("Did not see expected error (invalid fd flags)!\n");
+ printf("FAIL (Did not see expected error (invalid fd flags)!)\n");
ret = -1;
goto out;
}
- printf("Expected error checking passed\n");
+ printf("OK\n");
ret = 0;
out:
if (dmabuf_fd >= 0)
@@ -379,10 +458,20 @@ int main(void)
if (!strncmp(dir->d_name, "..", 3))
continue;
+ printf("Testing heap: %s\n", dir->d_name);
+ printf("=======================================\n");
ret = test_alloc_and_import(dir->d_name);
if (ret)
break;
+ ret = test_alloc_zeroed(dir->d_name, 4 * 1024);
+ if (ret)
+ break;
+
+ ret = test_alloc_zeroed(dir->d_name, ONE_MEG);
+ if (ret)
+ break;
+
ret = test_alloc_compat(dir->d_name);
if (ret)
break;
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc
index 59216f3cfb12..2968cdc7df30 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc
@@ -32,6 +32,10 @@ grep "myevent[[:space:]]u64 var1" synthetic_events
# it is not possible to add same name event
! echo "myevent u64 var2" >> synthetic_events
+# make sure !synthetic event doesn't require a field
+echo "!myevent" >> synthetic_events
+echo "myevent u64 var1" >> synthetic_events
+
# Non-append open will cleanup all events and add new one
echo "myevent u64 var2" > synthetic_events
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc
index ada594fe16cb..955e3ceea44b 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc
@@ -1,19 +1,38 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test synthetic_events syntax parser errors
-# requires: synthetic_events error_log
+# requires: synthetic_events error_log "char name[]' >> synthetic_events":README
check_error() { # command-with-error-pos-by-^
ftrace_errlog_check 'synthetic_events' "$1" 'synthetic_events'
}
+check_dyn_error() { # command-with-error-pos-by-^
+ ftrace_errlog_check 'synthetic_events' "$1" 'dynamic_events'
+}
+
check_error 'myevent ^chr arg' # INVALID_TYPE
-check_error 'myevent ^char str[];; int v' # INVALID_TYPE
-check_error 'myevent char ^str]; int v' # INVALID_NAME
-check_error 'myevent char ^str;[]' # INVALID_NAME
-check_error 'myevent ^char str[; int v' # INVALID_TYPE
-check_error '^mye;vent char str[]' # BAD_NAME
-check_error 'myevent char str[]; ^int' # INVALID_FIELD
-check_error '^myevent' # INCOMPLETE_CMD
+check_error 'myevent ^unsigned arg' # INCOMPLETE_TYPE
+
+check_error 'myevent char ^str]; int v' # BAD_NAME
+check_error '^mye-vent char str[]' # BAD_NAME
+check_error 'myevent char ^st-r[]' # BAD_NAME
+
+check_error 'myevent char str;^[]' # INVALID_FIELD
+check_error 'myevent char str; ^int' # INVALID_FIELD
+
+check_error 'myevent char ^str[; int v' # INVALID_ARRAY_SPEC
+check_error 'myevent char ^str[kdjdk]' # INVALID_ARRAY_SPEC
+check_error 'myevent char ^str[257]' # INVALID_ARRAY_SPEC
+
+check_error '^mye;vent char str[]' # INVALID_CMD
+check_error '^myevent ; char str[]' # INVALID_CMD
+check_error '^myevent; char str[]' # INVALID_CMD
+check_error '^myevent ;char str[]' # INVALID_CMD
+check_error '^; char str[]' # INVALID_CMD
+check_error '^;myevent char str[]' # INVALID_CMD
+check_error '^myevent' # INVALID_CMD
+
+check_dyn_error '^s:junk/myevent char str[' # INVALID_DYN_CMD
exit 0
diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile
index 41582fe485ee..39f2bbe8dd3d 100644
--- a/tools/testing/selftests/gpio/Makefile
+++ b/tools/testing/selftests/gpio/Makefile
@@ -1,31 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-VAR_CFLAGS := $(shell pkg-config --cflags mount 2>/dev/null)
-VAR_LDLIBS := $(shell pkg-config --libs mount 2>/dev/null)
-ifeq ($(VAR_LDLIBS),)
-VAR_LDLIBS := -lmount -I/usr/include/libmount
-endif
-
-CFLAGS += -O2 -g -std=gnu99 -Wall -I../../../../usr/include/ $(VAR_CFLAGS)
-LDLIBS += $(VAR_LDLIBS)
-
TEST_PROGS := gpio-mockup.sh
TEST_FILES := gpio-mockup-sysfs.sh
-TEST_GEN_PROGS_EXTENDED := gpio-mockup-chardev
+TEST_GEN_PROGS_EXTENDED := gpio-mockup-cdev
-KSFT_KHDR_INSTALL := 1
include ../lib.mk
-
-GPIODIR := $(realpath ../../../gpio)
-GPIOOUT := $(OUTPUT)/tools-gpio/
-GPIOOBJ := $(GPIOOUT)/gpio-utils.o
-
-CLEAN += ; $(RM) -rf $(GPIOOUT)
-
-$(TEST_GEN_PROGS_EXTENDED): $(GPIOOBJ)
-
-$(GPIOOUT):
- mkdir -p $@
-
-$(GPIOOBJ): $(GPIOOUT)
- $(MAKE) OUTPUT=$(GPIOOUT) -C $(GPIODIR)
diff --git a/tools/testing/selftests/gpio/config b/tools/testing/selftests/gpio/config
index abaa6902b7b6..ce100342c20b 100644
--- a/tools/testing/selftests/gpio/config
+++ b/tools/testing/selftests/gpio/config
@@ -1,2 +1,3 @@
CONFIG_GPIOLIB=y
+CONFIG_GPIO_CDEV=y
CONFIG_GPIO_MOCKUP=m
diff --git a/tools/testing/selftests/gpio/gpio-mockup-cdev.c b/tools/testing/selftests/gpio/gpio-mockup-cdev.c
new file mode 100644
index 000000000000..e83eac71621a
--- /dev/null
+++ b/tools/testing/selftests/gpio/gpio-mockup-cdev.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GPIO mockup cdev test helper
+ *
+ * Copyright (C) 2020 Kent Gibson
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <linux/gpio.h>
+
+#define CONSUMER "gpio-mockup-cdev"
+
+static int request_line_v2(int cfd, unsigned int offset,
+ uint64_t flags, unsigned int val)
+{
+ struct gpio_v2_line_request req;
+ int ret;
+
+ memset(&req, 0, sizeof(req));
+ req.num_lines = 1;
+ req.offsets[0] = offset;
+ req.config.flags = flags;
+ strcpy(req.consumer, CONSUMER);
+ if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
+ req.config.num_attrs = 1;
+ req.config.attrs[0].mask = 1;
+ req.config.attrs[0].attr.id = GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES;
+ if (val)
+ req.config.attrs[0].attr.values = 1;
+ }
+ ret = ioctl(cfd, GPIO_V2_GET_LINE_IOCTL, &req);
+ if (ret == -1)
+ return -errno;
+ return req.fd;
+}
+
+
+static int get_value_v2(int lfd)
+{
+ struct gpio_v2_line_values vals;
+ int ret;
+
+ memset(&vals, 0, sizeof(vals));
+ vals.mask = 1;
+ ret = ioctl(lfd, GPIO_V2_LINE_GET_VALUES_IOCTL, &vals);
+ if (ret == -1)
+ return -errno;
+ return vals.bits & 0x1;
+}
+
+static int request_line_v1(int cfd, unsigned int offset,
+ uint32_t flags, unsigned int val)
+{
+ struct gpiohandle_request req;
+ int ret;
+
+ memset(&req, 0, sizeof(req));
+ req.lines = 1;
+ req.lineoffsets[0] = offset;
+ req.flags = flags;
+ strcpy(req.consumer_label, CONSUMER);
+ if (flags & GPIOHANDLE_REQUEST_OUTPUT)
+ req.default_values[0] = val;
+
+ ret = ioctl(cfd, GPIO_GET_LINEHANDLE_IOCTL, &req);
+ if (ret == -1)
+ return -errno;
+ return req.fd;
+}
+
+static int get_value_v1(int lfd)
+{
+ struct gpiohandle_data vals;
+ int ret;
+
+ memset(&vals, 0, sizeof(vals));
+ ret = ioctl(lfd, GPIOHANDLE_GET_LINE_VALUES_IOCTL, &vals);
+ if (ret == -1)
+ return -errno;
+ return vals.values[0];
+}
+
+static void usage(char *prog)
+{
+ printf("Usage: %s [-l] [-b <bias>] [-s <value>] [-u <uAPI>] <gpiochip> <offset>\n", prog);
+ printf(" -b: set line bias to one of pull-down, pull-up, disabled\n");
+ printf(" (default is to leave bias unchanged):\n");
+ printf(" -l: set line active low (default is active high)\n");
+ printf(" -s: set line value (default is to get line value)\n");
+ printf(" -u: uAPI version to use (default is 2)\n");
+ exit(-1);
+}
+
+static int wait_signal(void)
+{
+ int sig;
+ sigset_t wset;
+
+ sigemptyset(&wset);
+ sigaddset(&wset, SIGHUP);
+ sigaddset(&wset, SIGINT);
+ sigaddset(&wset, SIGTERM);
+ sigwait(&wset, &sig);
+
+ return sig;
+}
+
+int main(int argc, char *argv[])
+{
+ char *chip;
+ int opt, ret, cfd, lfd;
+ unsigned int offset, val, abiv;
+ uint32_t flags_v1;
+ uint64_t flags_v2;
+
+ abiv = 2;
+ ret = 0;
+ flags_v1 = GPIOHANDLE_REQUEST_INPUT;
+ flags_v2 = GPIO_V2_LINE_FLAG_INPUT;
+
+ while ((opt = getopt(argc, argv, "lb:s:u:")) != -1) {
+ switch (opt) {
+ case 'l':
+ flags_v1 |= GPIOHANDLE_REQUEST_ACTIVE_LOW;
+ flags_v2 |= GPIO_V2_LINE_FLAG_ACTIVE_LOW;
+ break;
+ case 'b':
+ if (strcmp("pull-up", optarg) == 0) {
+ flags_v1 |= GPIOHANDLE_REQUEST_BIAS_PULL_UP;
+ flags_v2 |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP;
+ } else if (strcmp("pull-down", optarg) == 0) {
+ flags_v1 |= GPIOHANDLE_REQUEST_BIAS_PULL_DOWN;
+ flags_v2 |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN;
+ } else if (strcmp("disabled", optarg) == 0) {
+ flags_v1 |= GPIOHANDLE_REQUEST_BIAS_DISABLE;
+ flags_v2 |= GPIO_V2_LINE_FLAG_BIAS_DISABLED;
+ }
+ break;
+ case 's':
+ val = atoi(optarg);
+ flags_v1 &= ~GPIOHANDLE_REQUEST_INPUT;
+ flags_v1 |= GPIOHANDLE_REQUEST_OUTPUT;
+ flags_v2 &= ~GPIO_V2_LINE_FLAG_INPUT;
+ flags_v2 |= GPIO_V2_LINE_FLAG_OUTPUT;
+ break;
+ case 'u':
+ abiv = atoi(optarg);
+ break;
+ default:
+ usage(argv[0]);
+ }
+ }
+
+ if (argc < optind + 2)
+ usage(argv[0]);
+
+ chip = argv[optind];
+ offset = atoi(argv[optind + 1]);
+
+ cfd = open(chip, 0);
+ if (cfd == -1) {
+ fprintf(stderr, "Failed to open %s: %s\n", chip, strerror(errno));
+ return -errno;
+ }
+
+ if (abiv == 1)
+ lfd = request_line_v1(cfd, offset, flags_v1, val);
+ else
+ lfd = request_line_v2(cfd, offset, flags_v2, val);
+
+ close(cfd);
+
+ if (lfd < 0) {
+ fprintf(stderr, "Failed to request %s:%d: %s\n", chip, offset, strerror(-lfd));
+ return lfd;
+ }
+
+ if (flags_v2 & GPIO_V2_LINE_FLAG_OUTPUT) {
+ wait_signal();
+ } else {
+ if (abiv == 1)
+ ret = get_value_v1(lfd);
+ else
+ ret = get_value_v2(lfd);
+ }
+
+ close(lfd);
+
+ return ret;
+}
diff --git a/tools/testing/selftests/gpio/gpio-mockup-chardev.c b/tools/testing/selftests/gpio/gpio-mockup-chardev.c
deleted file mode 100644
index 73ead8828d3a..000000000000
--- a/tools/testing/selftests/gpio/gpio-mockup-chardev.c
+++ /dev/null
@@ -1,323 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * GPIO chardev test helper
- *
- * Copyright (C) 2016 Bamvor Jian Zhang
- */
-
-#define _GNU_SOURCE
-#include <unistd.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <errno.h>
-#include <string.h>
-#include <fcntl.h>
-#include <getopt.h>
-#include <sys/ioctl.h>
-#include <libmount.h>
-#include <err.h>
-#include <dirent.h>
-#include <linux/gpio.h>
-#include "../../../gpio/gpio-utils.h"
-
-#define CONSUMER "gpio-selftest"
-#define GC_NUM 10
-enum direction {
- OUT,
- IN
-};
-
-static int get_debugfs(char **path)
-{
- struct libmnt_context *cxt;
- struct libmnt_table *tb;
- struct libmnt_iter *itr = NULL;
- struct libmnt_fs *fs;
- int found = 0, ret;
-
- cxt = mnt_new_context();
- if (!cxt)
- err(EXIT_FAILURE, "libmount context allocation failed");
-
- itr = mnt_new_iter(MNT_ITER_FORWARD);
- if (!itr)
- err(EXIT_FAILURE, "failed to initialize libmount iterator");
-
- if (mnt_context_get_mtab(cxt, &tb))
- err(EXIT_FAILURE, "failed to read mtab");
-
- while (mnt_table_next_fs(tb, itr, &fs) == 0) {
- const char *type = mnt_fs_get_fstype(fs);
-
- if (!strcmp(type, "debugfs")) {
- found = 1;
- break;
- }
- }
- if (found) {
- ret = asprintf(path, "%s/gpio", mnt_fs_get_target(fs));
- if (ret < 0)
- err(EXIT_FAILURE, "failed to format string");
- }
-
- mnt_free_iter(itr);
- mnt_free_context(cxt);
-
- if (!found)
- return -1;
-
- return 0;
-}
-
-static int gpio_debugfs_get(const char *consumer, int *dir, int *value)
-{
- char *debugfs;
- FILE *f;
- char *line = NULL;
- size_t len = 0;
- char *cur;
- int found = 0;
-
- if (get_debugfs(&debugfs) != 0)
- err(EXIT_FAILURE, "debugfs is not mounted");
-
- f = fopen(debugfs, "r");
- if (!f)
- err(EXIT_FAILURE, "read from gpio debugfs failed");
-
- /*
- * gpio-2 ( |gpio-selftest ) in lo
- */
- while (getline(&line, &len, f) != -1) {
- cur = strstr(line, consumer);
- if (cur == NULL)
- continue;
-
- cur = strchr(line, ')');
- if (!cur)
- continue;
-
- cur += 2;
- if (!strncmp(cur, "out", 3)) {
- *dir = OUT;
- cur += 4;
- } else if (!strncmp(cur, "in", 2)) {
- *dir = IN;
- cur += 4;
- }
-
- if (!strncmp(cur, "hi", 2))
- *value = 1;
- else if (!strncmp(cur, "lo", 2))
- *value = 0;
-
- found = 1;
- break;
- }
- free(debugfs);
- fclose(f);
- free(line);
-
- if (!found)
- return -1;
-
- return 0;
-}
-
-static struct gpiochip_info *list_gpiochip(const char *gpiochip_name, int *ret)
-{
- struct gpiochip_info *cinfo;
- struct gpiochip_info *current;
- const struct dirent *ent;
- DIR *dp;
- char *chrdev_name;
- int fd;
- int i = 0;
-
- cinfo = calloc(sizeof(struct gpiochip_info) * 4, GC_NUM + 1);
- if (!cinfo)
- err(EXIT_FAILURE, "gpiochip_info allocation failed");
-
- current = cinfo;
- dp = opendir("/dev");
- if (!dp) {
- *ret = -errno;
- goto error_out;
- } else {
- *ret = 0;
- }
-
- while (ent = readdir(dp), ent) {
- if (check_prefix(ent->d_name, "gpiochip")) {
- *ret = asprintf(&chrdev_name, "/dev/%s", ent->d_name);
- if (*ret < 0)
- goto error_out;
-
- fd = open(chrdev_name, 0);
- if (fd == -1) {
- *ret = -errno;
- fprintf(stderr, "Failed to open %s\n",
- chrdev_name);
- goto error_close_dir;
- }
- *ret = ioctl(fd, GPIO_GET_CHIPINFO_IOCTL, current);
- if (*ret == -1) {
- perror("Failed to issue CHIPINFO IOCTL\n");
- goto error_close_dir;
- }
- close(fd);
- if (strcmp(current->label, gpiochip_name) == 0
- || check_prefix(current->label, gpiochip_name)) {
- *ret = 0;
- current++;
- i++;
- }
- }
- }
-
- if ((!*ret && i == 0) || *ret < 0) {
- free(cinfo);
- cinfo = NULL;
- }
- if (!*ret && i > 0) {
- cinfo = realloc(cinfo, sizeof(struct gpiochip_info) * 4 * i);
- *ret = i;
- }
-
-error_close_dir:
- closedir(dp);
-error_out:
- if (*ret < 0)
- err(EXIT_FAILURE, "list gpiochip failed: %s", strerror(*ret));
-
- return cinfo;
-}
-
-int gpio_pin_test(struct gpiochip_info *cinfo, int line, int flag, int value)
-{
- struct gpiohandle_data data;
- unsigned int lines[] = {line};
- int fd;
- int debugfs_dir = IN;
- int debugfs_value = 0;
- int ret;
-
- data.values[0] = value;
- ret = gpiotools_request_linehandle(cinfo->name, lines, 1, flag, &data,
- CONSUMER);
- if (ret < 0)
- goto fail_out;
- else
- fd = ret;
-
- ret = gpio_debugfs_get(CONSUMER, &debugfs_dir, &debugfs_value);
- if (ret) {
- ret = -EINVAL;
- goto fail_out;
- }
- if (flag & GPIOHANDLE_REQUEST_INPUT) {
- if (debugfs_dir != IN) {
- errno = -EINVAL;
- ret = -errno;
- }
- } else if (flag & GPIOHANDLE_REQUEST_OUTPUT) {
- if (flag & GPIOHANDLE_REQUEST_ACTIVE_LOW)
- debugfs_value = !debugfs_value;
-
- if (!(debugfs_dir == OUT && value == debugfs_value)) {
- errno = -EINVAL;
- ret = -errno;
- }
- }
- gpiotools_release_linehandle(fd);
-
-fail_out:
- if (ret)
- err(EXIT_FAILURE, "gpio<%s> line<%d> test flag<0x%x> value<%d>",
- cinfo->name, line, flag, value);
-
- return ret;
-}
-
-void gpio_pin_tests(struct gpiochip_info *cinfo, unsigned int line)
-{
- printf("line<%d>", line);
- gpio_pin_test(cinfo, line, GPIOHANDLE_REQUEST_OUTPUT, 0);
- printf(".");
- gpio_pin_test(cinfo, line, GPIOHANDLE_REQUEST_OUTPUT, 1);
- printf(".");
- gpio_pin_test(cinfo, line,
- GPIOHANDLE_REQUEST_OUTPUT | GPIOHANDLE_REQUEST_ACTIVE_LOW,
- 0);
- printf(".");
- gpio_pin_test(cinfo, line,
- GPIOHANDLE_REQUEST_OUTPUT | GPIOHANDLE_REQUEST_ACTIVE_LOW,
- 1);
- printf(".");
- gpio_pin_test(cinfo, line, GPIOHANDLE_REQUEST_INPUT, 0);
- printf(".");
-}
-
-/*
- * ./gpio-mockup-chardev gpio_chip_name_prefix is_valid_gpio_chip
- * Return 0 if successful or exit with EXIT_FAILURE if test failed.
- * gpio_chip_name_prefix: The prefix of gpiochip you want to test. E.g.
- * gpio-mockup
- * is_valid_gpio_chip: Whether the gpio_chip is valid. 1 means valid,
- * 0 means invalid which could not be found by
- * list_gpiochip.
- */
-int main(int argc, char *argv[])
-{
- char *prefix;
- int valid;
- struct gpiochip_info *cinfo;
- struct gpiochip_info *current;
- int i;
- int ret;
-
- if (argc < 3) {
- printf("Usage: %s prefix is_valid", argv[0]);
- exit(EXIT_FAILURE);
- }
-
- prefix = argv[1];
- valid = strcmp(argv[2], "true") == 0 ? 1 : 0;
-
- printf("Test gpiochip %s: ", prefix);
- cinfo = list_gpiochip(prefix, &ret);
- if (!cinfo) {
- if (!valid && ret == 0) {
- printf("Invalid test successful\n");
- ret = 0;
- goto out;
- } else {
- ret = -EINVAL;
- goto out;
- }
- } else if (cinfo && !valid) {
- ret = -EINVAL;
- goto out;
- }
- current = cinfo;
- for (i = 0; i < ret; i++) {
- gpio_pin_tests(current, 0);
- gpio_pin_tests(current, current->lines - 1);
- gpio_pin_tests(current, random() % current->lines);
- current++;
- }
- ret = 0;
- printf("successful\n");
-
-out:
- if (ret)
- fprintf(stderr, "gpio<%s> test failed\n", prefix);
-
- if (cinfo)
- free(cinfo);
-
- if (ret)
- exit(EXIT_FAILURE);
-
- return ret;
-}
diff --git a/tools/testing/selftests/gpio/gpio-mockup-sysfs.sh b/tools/testing/selftests/gpio/gpio-mockup-sysfs.sh
index dd269d877562..2d2e5d8763b6 100755
--- a/tools/testing/selftests/gpio/gpio-mockup-sysfs.sh
+++ b/tools/testing/selftests/gpio/gpio-mockup-sysfs.sh
@@ -1,135 +1,77 @@
# SPDX-License-Identifier: GPL-2.0
-is_consistent()
-{
- val=
-
- active_low_sysfs=`cat $GPIO_SYSFS/gpio$nr/active_low`
- val_sysfs=`cat $GPIO_SYSFS/gpio$nr/value`
- dir_sysfs=`cat $GPIO_SYSFS/gpio$nr/direction`
- gpio_this_debugfs=`cat $GPIO_DEBUGFS |grep "gpio-$nr" | sed "s/(.*)//g"`
- dir_debugfs=`echo $gpio_this_debugfs | awk '{print $2}'`
- val_debugfs=`echo $gpio_this_debugfs | awk '{print $3}'`
- if [ $val_debugfs = "lo" ]; then
- val=0
- elif [ $val_debugfs = "hi" ]; then
- val=1
- fi
+# Overrides functions in gpio-mockup.sh to test using the GPIO SYSFS uAPI
- if [ $active_low_sysfs = "1" ]; then
- if [ $val = "0" ]; then
- val="1"
- else
- val="0"
- fi
- fi
+SYSFS=`grep -w sysfs /proc/mounts | cut -f2 -d' '`
+[ -d "$SYSFS" ] || skip "sysfs is not mounted"
- if [ $val_sysfs = $val ] && [ $dir_sysfs = $dir_debugfs ]; then
- echo -n "."
- else
- echo "test fail, exit"
- die
- fi
-}
+GPIO_SYSFS="${SYSFS}/class/gpio"
+[ -d "$GPIO_SYSFS" ] || skip "CONFIG_GPIO_SYSFS is not selected"
-test_pin_logic()
-{
- nr=$1
- direction=$2
- active_low=$3
- value=$4
+PLATFORM_SYSFS=$SYSFS/devices/platform
- echo $direction > $GPIO_SYSFS/gpio$nr/direction
- echo $active_low > $GPIO_SYSFS/gpio$nr/active_low
- if [ $direction = "out" ]; then
- echo $value > $GPIO_SYSFS/gpio$nr/value
- fi
- is_consistent $nr
-}
+sysfs_nr=
+sysfs_ldir=
-test_one_pin()
+# determine the sysfs GPIO number given the $chip and $offset
+# e.g. gpiochip1:32
+find_sysfs_nr()
{
- nr=$1
-
- echo -n "test pin<$nr>"
-
- echo $nr > $GPIO_SYSFS/export 2>/dev/null
-
- if [ X$? != X0 ]; then
- echo "test GPIO pin $nr failed"
- die
- fi
-
- #"Checking if the sysfs is consistent with debugfs: "
- is_consistent $nr
-
- #"Checking the logic of active_low: "
- test_pin_logic $nr out 1 1
- test_pin_logic $nr out 1 0
- test_pin_logic $nr out 0 1
- test_pin_logic $nr out 0 0
-
- #"Checking the logic of direction: "
- test_pin_logic $nr in 1 1
- test_pin_logic $nr out 1 0
- test_pin_logic $nr low 0 1
- test_pin_logic $nr high 0 0
-
- echo $nr > $GPIO_SYSFS/unexport
-
- echo "successful"
+ # e.g. /sys/devices/platform/gpio-mockup.1/gpiochip1
+ local platform=$(find $PLATFORM_SYSFS -mindepth 2 -maxdepth 2 -type d -name $chip)
+ [ "$platform" ] || fail "can't find platform of $chip"
+ # e.g. /sys/devices/platform/gpio-mockup.1/gpio/gpiochip508/base
+ local base=$(find ${platform%/*}/gpio/ -mindepth 2 -maxdepth 2 -type f -name base)
+ [ "$base" ] || fail "can't find base of $chip"
+ sysfs_nr=$(($(< "$base") + $offset))
+ sysfs_ldir="$GPIO_SYSFS/gpio$sysfs_nr"
}
-test_one_pin_fail()
+acquire_line()
{
- nr=$1
-
- echo $nr > $GPIO_SYSFS/export 2>/dev/null
-
- if [ X$? != X0 ]; then
- echo "test invalid pin $nr successful"
- else
- echo "test invalid pin $nr failed"
- echo $nr > $GPIO_SYSFS/unexport 2>/dev/null
- die
- fi
+ [ "$sysfs_nr" ] && return
+ find_sysfs_nr
+ echo "$sysfs_nr" > "$GPIO_SYSFS/export"
}
-list_chip()
+# The helpers being overridden...
+get_line()
{
- echo `ls -d $GPIO_DRV_SYSFS/gpiochip* 2>/dev/null`
+ [ -e "$sysfs_ldir/value" ] && echo $(< "$sysfs_ldir/value")
}
-test_chip()
+set_line()
{
- chip=$1
- name=`basename $chip`
- base=`cat $chip/base`
- ngpio=`cat $chip/ngpio`
- printf "%-10s %-5s %-5s\n" $name $base $ngpio
- if [ $ngpio = "0" ]; then
- echo "number of gpio is zero is not allowed".
- fi
- test_one_pin $base
- test_one_pin $(($base + $ngpio - 1))
- test_one_pin $((( RANDOM % $ngpio ) + $base ))
+ acquire_line
+
+ for option in $*; do
+ case $option in
+ active-high)
+ echo 0 > "$sysfs_ldir/active_low"
+ ;;
+ active-low)
+ echo 1 > "$sysfs_ldir/active_low"
+ ;;
+ input)
+ echo "in" > "$sysfs_ldir/direction"
+ ;;
+ 0)
+ echo "out" > "$sysfs_ldir/direction"
+ echo 0 > "$sysfs_ldir/value"
+ ;;
+ 1)
+ echo "out" > "$sysfs_ldir/direction"
+ echo 1 > "$sysfs_ldir/value"
+ ;;
+ esac
+ done
}
-test_chips_sysfs()
+release_line()
{
- gpiochip=`list_chip $module`
- if [ X"$gpiochip" = X ]; then
- if [ X"$valid" = Xfalse ]; then
- echo "successful"
- else
- echo "fail"
- die
- fi
- else
- for chip in $gpiochip; do
- test_chip $chip
- done
- fi
+ [ "$sysfs_nr" ] || return 0
+ echo "$sysfs_nr" > "$GPIO_SYSFS/unexport"
+ sysfs_nr=
+ sysfs_ldir=
}
-
diff --git a/tools/testing/selftests/gpio/gpio-mockup.sh b/tools/testing/selftests/gpio/gpio-mockup.sh
index 7f35b9880485..0d6c5f7f95d2 100755
--- a/tools/testing/selftests/gpio/gpio-mockup.sh
+++ b/tools/testing/selftests/gpio/gpio-mockup.sh
@@ -1,72 +1,57 @@
-#!/bin/bash
+#!/bin/bash -efu
# SPDX-License-Identifier: GPL-2.0
#exit status
-#1: Internal error
-#2: sysfs/debugfs not mount
-#3: insert module fail when gpio-mockup is a module.
-#4: Skip test including run as non-root user.
-#5: other reason.
-
-SYSFS=
-GPIO_SYSFS=
-GPIO_DRV_SYSFS=
+#0: success
+#1: fail
+#4: skip test - including run as non-root user
+
+BASE=${0%/*}
DEBUGFS=
GPIO_DEBUGFS=
-dev_type=
-module=
+dev_type="cdev"
+module="gpio-mockup"
+verbose=
+full_test=
+random=
+uapi_opt=
+active_opt=
+bias_opt=
+line_set_pid=
-# Kselftest framework requirement - SKIP code is 4.
+# Kselftest return codes
+ksft_fail=1
ksft_skip=4
usage()
{
echo "Usage:"
- echo "$0 [-f] [-m name] [-t type]"
- echo "-f: full test. It maybe conflict with existence gpio device."
- echo "-m: module name, default name is gpio-mockup. It could also test"
- echo " other gpio device."
- echo "-t: interface type: chardev(char device) and sysfs(being"
- echo " deprecated). The first one is default"
- echo ""
- echo "$0 -h"
- echo "This usage"
+ echo "$0 [-frv] [-t type]"
+ echo "-f: full test (minimal set run by default)"
+ echo "-r: test random lines as well as fence posts"
+ echo "-t: interface type:"
+ echo " cdev (character device ABI) - default"
+ echo " cdev_v1 (deprecated character device ABI)"
+ echo " sysfs (deprecated SYSFS ABI)"
+ echo "-v: verbose progress reporting"
+ exit $ksft_fail
}
-prerequisite()
+skip()
{
- msg="skip all tests:"
- if [ $UID != 0 ]; then
- echo $msg must be run as root >&2
- exit $ksft_skip
- fi
- SYSFS=`mount -t sysfs | head -1 | awk '{ print $3 }'`
- if [ ! -d "$SYSFS" ]; then
- echo $msg sysfs is not mounted >&2
- exit 2
- fi
- GPIO_SYSFS=`echo $SYSFS/class/gpio`
- GPIO_DRV_SYSFS=`echo $SYSFS/devices/platform/$module/gpio`
- DEBUGFS=`mount -t debugfs | head -1 | awk '{ print $3 }'`
- if [ ! -d "$DEBUGFS" ]; then
- echo $msg debugfs is not mounted >&2
- exit 2
- fi
- GPIO_DEBUGFS=`echo $DEBUGFS/gpio`
- source gpio-mockup-sysfs.sh
+ echo "$*" >&2
+ echo "GPIO $module test SKIP"
+ exit $ksft_skip
}
-try_insert_module()
+prerequisite()
{
- if [ -d "$GPIO_DRV_SYSFS" ]; then
- echo "$GPIO_DRV_SYSFS exist. Skip insert module"
- else
- modprobe -q $module $1
- if [ X$? != X0 ]; then
- echo $msg insmod $module failed >&2
- exit 3
- fi
- fi
+ [ $(id -u) -eq 0 ] || skip "must be run as root"
+
+ DEBUGFS=$(grep -w debugfs /proc/mounts | cut -f2 -d' ')
+ [ -d "$DEBUGFS" ] || skip "debugfs is not mounted"
+
+ GPIO_DEBUGFS=$DEBUGFS/$module
}
remove_module()
@@ -74,133 +59,345 @@ remove_module()
modprobe -r -q $module
}
-die()
+cleanup()
{
+ set +e
+ release_line
remove_module
- exit 5
+ jobs -p | xargs -r kill > /dev/null 2>&1
}
-test_chips()
+fail()
{
- if [ X$dev_type = Xsysfs ]; then
- echo "WARNING: sysfs ABI of gpio is going to deprecated."
- test_chips_sysfs $*
- else
- $BASE/gpio-mockup-chardev $*
- fi
+ echo "test failed: $*" >&2
+ echo "GPIO $module test FAIL"
+ exit $ksft_fail
+}
+
+try_insert_module()
+{
+ modprobe -q $module "$1" || fail "insert $module failed with error $?"
+}
+
+log()
+{
+ [ -z "$verbose" ] || echo "$*"
}
-gpio_test()
+# The following line helpers, release_Line, get_line and set_line, all
+# make use of the global $chip and $offset variables.
+#
+# This implementation drives the GPIO character device (cdev) uAPI.
+# Other implementations may override these to test different uAPIs.
+
+# Release any resources related to the line
+release_line()
{
- param=$1
- valid=$2
+ [ "$line_set_pid" ] && kill $line_set_pid && wait $line_set_pid || true
+ line_set_pid=
+}
- if [ X"$param" = X ]; then
- die
+# Read the current value of the line
+get_line()
+{
+ release_line
+
+ local cdev_opts=${uapi_opt}${active_opt}
+ $BASE/gpio-mockup-cdev $cdev_opts /dev/$chip $offset
+ echo $?
+}
+
+# Set the state of the line
+#
+# Changes to line configuration are provided as parameters.
+# The line is assumed to be an output if the line value 0 or 1 is
+# specified, else an input.
+set_line()
+{
+ local val=
+
+ release_line
+
+ # parse config options...
+ for option in $*; do
+ case $option in
+ active-low)
+ active_opt="-l "
+ ;;
+ active-high)
+ active_opt=
+ ;;
+ bias-none)
+ bias_opt=
+ ;;
+ pull-down)
+ bias_opt="-bpull-down "
+ ;;
+ pull-up)
+ bias_opt="-bpull-up "
+ ;;
+ 0)
+ val=0
+ ;;
+ 1)
+ val=1
+ ;;
+ esac
+ done
+
+ local cdev_opts=${uapi_opt}${active_opt}
+ if [ "$val" ]; then
+ $BASE/gpio-mockup-cdev $cdev_opts -s$val /dev/$chip $offset &
+ # failure to set is detected by reading mockup and toggling values
+ line_set_pid=$!
+ # allow for gpio-mockup-cdev to launch and request line
+ # (there is limited value in checking if line has been requested)
+ sleep 0.01
+ elif [ "$bias_opt" ]; then
+ cdev_opts=${cdev_opts}${bias_opt}
+ $BASE/gpio-mockup-cdev $cdev_opts /dev/$chip $offset || true
fi
- try_insert_module "gpio_mockup_ranges=$param"
- echo -n "GPIO $module test with ranges: <"
- echo "$param>: "
- printf "%-10s %s\n" $param
- test_chips $module $valid
- remove_module
}
-BASE=`dirname $0`
+assert_line()
+{
+ local val
+ # don't need any retry here as set_mock allows for propagation
+ val=$(get_line)
+ [ "$val" = "$1" ] || fail "line value is ${val:-empty} when $1 was expected"
+}
+
+# The following mockup helpers all make use of the $mock_line
+assert_mock()
+{
+ local backoff_wait=10
+ local retry=0
+ local val
+ # retry allows for set propagation from uAPI to mockup
+ while true; do
+ val=$(< $mock_line)
+ [ "$val" = "$1" ] && break
+ retry=$((retry + 1))
+ [ $retry -lt 5 ] || fail "mockup $mock_line value ${val:-empty} when $1 expected"
+ sleep $(printf "%0.2f" $((backoff_wait))e-3)
+ backoff_wait=$((backoff_wait * 2))
+ done
+}
+
+set_mock()
+{
+ echo "$1" > $mock_line
+ # allow for set propagation - so we won't be in a race with set_line
+ assert_mock "$1"
+}
-dev_type=
-TEMP=`getopt -o fhm:t: -n '$0' -- "$@"`
+# test the functionality of a line
+#
+# The line is set from the mockup side and is read from the userspace side
+# (input), and is set from the userspace side and is read from the mockup side
+# (output).
+#
+# Setting the mockup pull using the userspace interface bias settings is
+# tested where supported by the userspace interface (cdev).
+test_line()
+{
+ chip=$1
+ offset=$2
+ log "test_line $chip $offset"
+ mock_line=$GPIO_DEBUGFS/$chip/$offset
+ [ -e "$mock_line" ] || fail "missing line $chip:$offset"
-if [ "$?" != "0" ]; then
- echo "Parameter process failed, Terminating..." >&2
- exit 1
-fi
+ # test input active-high
+ set_mock 1
+ set_line input active-high
+ assert_line 1
+ set_mock 0
+ assert_line 0
+ set_mock 1
+ assert_line 1
+
+ if [ "$full_test" ]; then
+ if [ "$dev_type" != "sysfs" ]; then
+ # test pulls
+ set_mock 0
+ set_line input pull-up
+ assert_line 1
+ set_mock 0
+ assert_line 0
+
+ set_mock 1
+ set_line input pull-down
+ assert_line 0
+ set_mock 1
+ assert_line 1
+
+ set_line bias-none
+ fi
+
+ # test input active-low
+ set_mock 0
+ set_line active-low
+ assert_line 1
+ set_mock 1
+ assert_line 0
+ set_mock 0
+ assert_line 1
+
+ # test output active-high
+ set_mock 1
+ set_line active-high 0
+ assert_mock 0
+ set_line 1
+ assert_mock 1
+ set_line 0
+ assert_mock 0
+ fi
+
+ # test output active-low
+ set_mock 0
+ set_line active-low 0
+ assert_mock 1
+ set_line 1
+ assert_mock 0
+ set_line 0
+ assert_mock 1
+
+ release_line
+}
+
+test_no_line()
+{
+ log test_no_line "$*"
+ [ ! -e "$GPIO_DEBUGFS/$1/$2" ] || fail "unexpected line $1:$2"
+}
-# Note the quotes around `$TEMP': they are essential!
-eval set -- "$TEMP"
+# Load the module and check that the expected number of gpiochips, with the
+# expected number of lines, are created and are functional.
+#
+# $1 is the gpio_mockup_ranges parameter for the module
+# The remaining parameters are the number of lines, n, expected for each of
+# the gpiochips expected to be created.
+#
+# For each gpiochip the fence post lines, 0 and n-1, are tested, and the
+# line on the far side of the fence post, n, is tested to not exist.
+#
+# If the $random flag is set then a random line in the middle of the
+# gpiochip is tested as well.
+insmod_test()
+{
+ local ranges=
+ local gc=
+ local width=
-while true; do
- case $1 in
- -f)
+ [ "${1:-}" ] || fail "missing ranges"
+ ranges=$1 ; shift
+ try_insert_module "gpio_mockup_ranges=$ranges"
+ log "GPIO $module test with ranges: <$ranges>:"
+ # e.g. /sys/kernel/debug/gpio-mockup/gpiochip1
+ gpiochip=$(find "$DEBUGFS/$module/" -name gpiochip* -type d | sort)
+ for chip in $gpiochip; do
+ gc=${chip##*/}
+ [ "${1:-}" ] || fail "unexpected chip - $gc"
+ width=$1 ; shift
+ test_line $gc 0
+ if [ "$random" -a $width -gt 2 ]; then
+ test_line $gc $((RANDOM % ($width - 2) + 1))
+ fi
+ test_line $gc $(($width - 1))
+ test_no_line $gc $width
+ done
+ [ "${1:-}" ] && fail "missing expected chip of width $1"
+ remove_module || fail "failed to remove module with error $?"
+}
+
+while getopts ":frvt:" opt; do
+ case $opt in
+ f)
full_test=true
- shift
- ;;
- -h)
- usage
- exit
;;
- -m)
- module=$2
- shift 2
+ r)
+ random=true
;;
- -t)
- dev_type=$2
- shift 2
+ t)
+ dev_type=$OPTARG
;;
- --)
- shift
- break
+ v)
+ verbose=true
;;
*)
- echo "Internal error!"
- exit 1
+ usage
;;
esac
done
+shift $((OPTIND - 1))
-if [ X"$module" = X ]; then
- module="gpio-mockup"
-fi
-
-if [ X$dev_type != Xsysfs ]; then
- dev_type="chardev"
-fi
+[ "${1:-}" ] && fail "unknown argument '$1'"
prerequisite
-echo "1. Test dynamic allocation of gpio successful means insert gpiochip and"
-echo " manipulate gpio pin successful"
-gpio_test "-1,32" true
-gpio_test "-1,32,-1,32" true
-gpio_test "-1,32,-1,32,-1,32" true
-if [ X$full_test = Xtrue ]; then
- gpio_test "-1,32,32,64" true
- gpio_test "-1,32,40,64,-1,5" true
- gpio_test "-1,32,32,64,-1,32" true
- gpio_test "0,32,32,64,-1,32,-1,32" true
- gpio_test "-1,32,-1,32,0,32,32,64" true
- echo "2. Do basic test: successful means insert gpiochip and"
- echo " manipulate gpio pin successful"
- gpio_test "0,32" true
- gpio_test "0,32,32,64" true
- gpio_test "0,32,40,64,64,96" true
+trap 'exit $ksft_fail' SIGTERM SIGINT
+trap cleanup EXIT
+
+case "$dev_type" in
+sysfs)
+ source $BASE/gpio-mockup-sysfs.sh
+ echo "WARNING: gpio sysfs ABI is deprecated."
+ ;;
+cdev_v1)
+ echo "WARNING: gpio cdev ABI v1 is deprecated."
+ uapi_opt="-u1 "
+ ;;
+cdev)
+ ;;
+*)
+ fail "unknown interface type: $dev_type"
+ ;;
+esac
+
+remove_module || fail "can't remove existing $module module"
+
+# manual gpio allocation tests fail if a physical chip already exists
+[ "$full_test" -a -e "/dev/gpiochip0" ] && skip "full tests conflict with gpiochip0"
+
+echo "1. Module load tests"
+echo "1.1. dynamic allocation of gpio"
+insmod_test "-1,32" 32
+insmod_test "-1,23,-1,32" 23 32
+insmod_test "-1,23,-1,26,-1,32" 23 26 32
+if [ "$full_test" ]; then
+ echo "1.2. manual allocation of gpio"
+ insmod_test "0,32" 32
+ insmod_test "0,32,32,60" 32 28
+ insmod_test "0,32,40,64,64,96" 32 24 32
+ echo "1.3. dynamic and manual allocation of gpio"
+ insmod_test "-1,32,32,62" 32 30
+ insmod_test "-1,22,-1,23,0,24,32,64" 22 23 24 32
+ insmod_test "-1,32,32,60,-1,29" 32 28 29
+ insmod_test "-1,32,40,64,-1,5" 32 24 5
+ insmod_test "0,32,32,44,-1,22,-1,31" 32 12 22 31
fi
-echo "3. Error test: successful means insert gpiochip failed"
-echo "3.1 Test number of gpio overflow"
-#Currently: The max number of gpio(1024) is defined in arm architecture.
-gpio_test "-1,32,-1,1024" false
-if [ X$full_test = Xtrue ]; then
- echo "3.2 Test zero line of gpio"
- gpio_test "0,0" false
- echo "3.3 Test range overlap"
- echo "3.3.1 Test corner case"
- gpio_test "0,32,0,1" false
- gpio_test "0,32,32,64,32,40" false
- gpio_test "0,32,35,64,35,45" false
- gpio_test "0,32,31,32" false
- gpio_test "0,32,32,64,36,37" false
- gpio_test "0,32,35,64,34,36" false
- echo "3.3.2 Test inserting invalid second gpiochip"
- gpio_test "0,32,30,35" false
- gpio_test "0,32,1,5" false
- gpio_test "10,32,9,14" false
- gpio_test "10,32,30,35" false
- echo "3.3.3 Test others"
- gpio_test "0,32,40,56,39,45" false
- gpio_test "0,32,40,56,30,33" false
- gpio_test "0,32,40,56,30,41" false
- gpio_test "0,32,40,56,20,21" false
+echo "2. Module load error tests"
+echo "2.1 gpio overflow"
+# Currently: The max number of gpio(1024) is defined in arm architecture.
+insmod_test "-1,1024"
+if [ "$full_test" ]; then
+ echo "2.2 no lines defined"
+ insmod_test "0,0"
+ echo "2.3 ignore range overlap"
+ insmod_test "0,32,0,1" 32
+ insmod_test "0,32,1,5" 32
+ insmod_test "0,32,30,35" 32
+ insmod_test "0,32,31,32" 32
+ insmod_test "10,32,30,35" 22
+ insmod_test "10,32,9,14" 22
+ insmod_test "0,32,20,21,40,56" 32 16
+ insmod_test "0,32,32,64,32,40" 32 32
+ insmod_test "0,32,32,64,36,37" 32 32
+ insmod_test "0,32,35,64,34,36" 32 29
+ insmod_test "0,30,35,64,35,45" 30 29
+ insmod_test "0,32,40,56,30,33" 32 16
+ insmod_test "0,32,40,56,30,41" 32 16
+ insmod_test "0,32,40,56,39,45" 32 16
fi
-echo GPIO test PASS
-
+echo "GPIO $module test PASS"
diff --git a/tools/testing/selftests/ipc/msgque.c b/tools/testing/selftests/ipc/msgque.c
index 5ec4d9e18806..656c43c24044 100644
--- a/tools/testing/selftests/ipc/msgque.c
+++ b/tools/testing/selftests/ipc/msgque.c
@@ -69,7 +69,7 @@ int restore_queue(struct msgque_data *msgque)
printf("msgsnd failed (%m)\n");
ret = -errno;
goto destroy;
- };
+ }
}
return 0;
@@ -180,7 +180,7 @@ int fill_msgque(struct msgque_data *msgque)
IPC_NOWAIT) != 0) {
printf("First message send failed (%m)\n");
return -errno;
- };
+ }
msgbuf.mtype = ANOTHER_MSG_TYPE;
memcpy(msgbuf.mtext, ANOTHER_TEST_STRING, sizeof(ANOTHER_TEST_STRING));
@@ -188,7 +188,7 @@ int fill_msgque(struct msgque_data *msgque)
IPC_NOWAIT) != 0) {
printf("Second message send failed (%m)\n");
return -errno;
- };
+ }
return 0;
}
diff --git a/tools/testing/selftests/kselftest_deps.sh b/tools/testing/selftests/kselftest_deps.sh
index bbc04646346b..00e60d6eb16b 100755
--- a/tools/testing/selftests/kselftest_deps.sh
+++ b/tools/testing/selftests/kselftest_deps.sh
@@ -129,13 +129,11 @@ l2_tests=$(grep -r --include=Makefile ": LDLIBS" | \
grep -v "VAR_LDLIBS" | awk -F: '{print $1}')
# Level 3
-# gpio, memfd and others use pkg-config to find mount and fuse libs
+# memfd and others use pkg-config to find mount and fuse libs
# respectively and save it in VAR_LDLIBS. If pkg-config doesn't find
# any, VAR_LDLIBS set to default.
# Use the default value and filter out pkg-config for dependency check.
# e.g:
-# gpio/Makefile
-# VAR_LDLIBS := $(shell pkg-config --libs mount) 2>/dev/null)
# memfd/Makefile
# VAR_LDLIBS := $(shell pkg-config fuse --libs 2>/dev/null)
diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
index edce85420d19..ae0f0f33b2a6 100644
--- a/tools/testing/selftests/kselftest_harness.h
+++ b/tools/testing/selftests/kselftest_harness.h
@@ -79,7 +79,7 @@
#endif
/**
- * TH_LOG(fmt, ...)
+ * TH_LOG()
*
* @fmt: format string
* @...: optional arguments
@@ -113,12 +113,16 @@
__FILE__, __LINE__, _metadata->name, ##__VA_ARGS__)
/**
- * SKIP(statement, fmt, ...)
+ * SKIP()
*
* @statement: statement to run after reporting SKIP
* @fmt: format string
* @...: optional arguments
*
+ * .. code-block:: c
+ *
+ * SKIP(statement, fmt, ...);
+ *
* This forces a "pass" after reporting why something is being skipped
* and runs "statement", which is usually "return" or "goto skip".
*/
@@ -136,7 +140,7 @@
} while (0)
/**
- * TEST(test_name) - Defines the test function and creates the registration
+ * TEST() - Defines the test function and creates the registration
* stub
*
* @test_name: test name
@@ -155,7 +159,7 @@
#define TEST(test_name) __TEST_IMPL(test_name, -1)
/**
- * TEST_SIGNAL(test_name, signal)
+ * TEST_SIGNAL()
*
* @test_name: test name
* @signal: signal number
@@ -195,7 +199,7 @@
struct __test_metadata __attribute__((unused)) *_metadata)
/**
- * FIXTURE_DATA(datatype_name) - Wraps the struct name so we have one less
+ * FIXTURE_DATA() - Wraps the struct name so we have one less
* argument to pass around
*
* @datatype_name: datatype name
@@ -212,7 +216,7 @@
#define FIXTURE_DATA(datatype_name) struct _test_data_##datatype_name
/**
- * FIXTURE(fixture_name) - Called once per fixture to setup the data and
+ * FIXTURE() - Called once per fixture to setup the data and
* register
*
* @fixture_name: fixture name
@@ -239,7 +243,7 @@
FIXTURE_DATA(fixture_name)
/**
- * FIXTURE_SETUP(fixture_name) - Prepares the setup function for the fixture.
+ * FIXTURE_SETUP() - Prepares the setup function for the fixture.
* *_metadata* is included so that EXPECT_* and ASSERT_* work correctly.
*
* @fixture_name: fixture name
@@ -265,7 +269,7 @@
__attribute__((unused)) *variant)
/**
- * FIXTURE_TEARDOWN(fixture_name)
+ * FIXTURE_TEARDOWN()
* *_metadata* is included so that EXPECT_* and ASSERT_* work correctly.
*
* @fixture_name: fixture name
@@ -286,7 +290,7 @@
FIXTURE_DATA(fixture_name) __attribute__((unused)) *self)
/**
- * FIXTURE_VARIANT(fixture_name) - Optionally called once per fixture
+ * FIXTURE_VARIANT() - Optionally called once per fixture
* to declare fixture variant
*
* @fixture_name: fixture name
@@ -305,7 +309,7 @@
#define FIXTURE_VARIANT(fixture_name) struct _fixture_variant_##fixture_name
/**
- * FIXTURE_VARIANT_ADD(fixture_name, variant_name) - Called once per fixture
+ * FIXTURE_VARIANT_ADD() - Called once per fixture
* variant to setup and register the data
*
* @fixture_name: fixture name
@@ -339,7 +343,7 @@
_##fixture_name##_##variant_name##_variant =
/**
- * TEST_F(fixture_name, test_name) - Emits test registration and helpers for
+ * TEST_F() - Emits test registration and helpers for
* fixture-based test cases
*
* @fixture_name: fixture name
diff --git a/tools/testing/selftests/kselftest_module.h b/tools/testing/selftests/kselftest_module.h
index e8eafaf0941a..e2ea41de3f35 100644
--- a/tools/testing/selftests/kselftest_module.h
+++ b/tools/testing/selftests/kselftest_module.h
@@ -11,7 +11,8 @@
#define KSTM_MODULE_GLOBALS() \
static unsigned int total_tests __initdata; \
-static unsigned int failed_tests __initdata
+static unsigned int failed_tests __initdata; \
+static unsigned int skipped_tests __initdata
#define KSTM_CHECK_ZERO(x) do { \
total_tests++; \
@@ -21,11 +22,16 @@ static unsigned int failed_tests __initdata
} \
} while (0)
-static inline int kstm_report(unsigned int total_tests, unsigned int failed_tests)
+static inline int kstm_report(unsigned int total_tests, unsigned int failed_tests,
+ unsigned int skipped_tests)
{
- if (failed_tests == 0)
- pr_info("all %u tests passed\n", total_tests);
- else
+ if (failed_tests == 0) {
+ if (skipped_tests) {
+ pr_info("skipped %u tests\n", skipped_tests);
+ pr_info("remaining %u tests passed\n", total_tests);
+ } else
+ pr_info("all %u tests passed\n", total_tests);
+ } else
pr_warn("failed %u out of %u tests\n", failed_tests, total_tests);
return failed_tests ? -EINVAL : 0;
@@ -36,7 +42,7 @@ static int __init __module##_init(void) \
{ \
pr_info("loaded.\n"); \
selftest(); \
- return kstm_report(total_tests, failed_tests); \
+ return kstm_report(total_tests, failed_tests, skipped_tests); \
} \
static void __exit __module##_exit(void) \
{ \
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index ce8f4ad39684..3a84394829ea 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -7,6 +7,7 @@
/x86_64/cr4_cpuid_sync_test
/x86_64/debug_regs
/x86_64/evmcs_test
+/x86_64/get_cpuid_test
/x86_64/kvm_pv_test
/x86_64/hyperv_cpuid
/x86_64/mmio_warning_test
@@ -24,10 +25,15 @@
/x86_64/vmx_preemption_timer_test
/x86_64/vmx_set_nested_state_test
/x86_64/vmx_tsc_adjust_test
+/x86_64/xapic_ipi_test
+/x86_64/xen_shinfo_test
+/x86_64/xen_vmcall_test
/x86_64/xss_msr_test
+/x86_64/vmx_pmu_msrs_test
/demand_paging_test
/dirty_log_test
/dirty_log_perf_test
/kvm_create_max_vcpus
+/memslot_modification_stress_test
/set_memory_region_test
/steal_time
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index fe41c6a0fa67..8c8eda429576 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -40,6 +40,7 @@ LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_ha
TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test
TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
+TEST_GEN_PROGS_x86_64 += x86_64/get_cpuid_test
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
TEST_GEN_PROGS_x86_64 += x86_64/kvm_pv_test
TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test
@@ -56,13 +57,18 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
+TEST_GEN_PROGS_x86_64 += x86_64/xapic_ipi_test
TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test
TEST_GEN_PROGS_x86_64 += x86_64/debug_regs
TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
+TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test
+TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
+TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
TEST_GEN_PROGS_x86_64 += demand_paging_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
+TEST_GEN_PROGS_x86_64 += memslot_modification_stress_test
TEST_GEN_PROGS_x86_64 += set_memory_region_test
TEST_GEN_PROGS_x86_64 += steal_time
diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
index cdad1eca72f7..5f7a229c3af1 100644
--- a/tools/testing/selftests/kvm/demand_paging_test.c
+++ b/tools/testing/selftests/kvm/demand_paging_test.c
@@ -64,7 +64,7 @@ static void *vcpu_worker(void *data)
exit_reason_str(run->exit_reason));
}
- ts_diff = timespec_diff_now(start);
+ ts_diff = timespec_elapsed(start);
PER_VCPU_DEBUG("vCPU %d execution time: %ld.%.9lds\n", vcpu_id,
ts_diff.tv_sec, ts_diff.tv_nsec);
@@ -95,7 +95,7 @@ static int handle_uffd_page_request(int uffd, uint64_t addr)
return r;
}
- ts_diff = timespec_diff_now(start);
+ ts_diff = timespec_elapsed(start);
PER_PAGE_DEBUG("UFFDIO_COPY %d \t%ld ns\n", tid,
timespec_to_ns(ts_diff));
@@ -190,7 +190,7 @@ static void *uffd_handler_thread_fn(void *arg)
pages++;
}
- ts_diff = timespec_diff_now(start);
+ ts_diff = timespec_elapsed(start);
PER_VCPU_DEBUG("userfaulted %ld pages over %ld.%.9lds. (%f/sec)\n",
pages, ts_diff.tv_sec, ts_diff.tv_nsec,
pages / ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0));
@@ -250,6 +250,7 @@ static int setup_demand_paging(struct kvm_vm *vm,
struct test_params {
bool use_uffd;
useconds_t uffd_delay;
+ bool partition_vcpu_memory_access;
};
static void run_test(enum vm_guest_mode mode, void *arg)
@@ -265,7 +266,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
int vcpu_id;
int r;
- vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size);
+ vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
+ VM_MEM_SRC_ANONYMOUS);
perf_test_args.wr_fract = 1;
@@ -277,7 +279,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
TEST_ASSERT(vcpu_threads, "Memory allocation failed");
- perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
+ perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size,
+ p->partition_vcpu_memory_access);
if (p->use_uffd) {
uffd_handler_threads =
@@ -293,10 +296,19 @@ static void run_test(enum vm_guest_mode mode, void *arg)
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
vm_paddr_t vcpu_gpa;
void *vcpu_hva;
+ uint64_t vcpu_mem_size;
- vcpu_gpa = guest_test_phys_mem + (vcpu_id * guest_percpu_mem_size);
+
+ if (p->partition_vcpu_memory_access) {
+ vcpu_gpa = guest_test_phys_mem +
+ (vcpu_id * guest_percpu_mem_size);
+ vcpu_mem_size = guest_percpu_mem_size;
+ } else {
+ vcpu_gpa = guest_test_phys_mem;
+ vcpu_mem_size = guest_percpu_mem_size * nr_vcpus;
+ }
PER_VCPU_DEBUG("Added VCPU %d with test mem gpa [%lx, %lx)\n",
- vcpu_id, vcpu_gpa, vcpu_gpa + guest_percpu_mem_size);
+ vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_mem_size);
/* Cache the HVA pointer of the region */
vcpu_hva = addr_gpa2hva(vm, vcpu_gpa);
@@ -313,7 +325,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
&uffd_handler_threads[vcpu_id],
pipefds[vcpu_id * 2],
p->uffd_delay, &uffd_args[vcpu_id],
- vcpu_hva, guest_percpu_mem_size);
+ vcpu_hva, vcpu_mem_size);
if (r < 0)
exit(-r);
}
@@ -339,7 +351,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
PER_VCPU_DEBUG("Joined thread for vCPU %d\n", vcpu_id);
}
- ts_diff = timespec_diff_now(start);
+ ts_diff = timespec_elapsed(start);
pr_info("All vCPU threads joined\n");
@@ -376,7 +388,7 @@ static void help(char *name)
{
puts("");
printf("usage: %s [-h] [-m mode] [-u] [-d uffd_delay_usec]\n"
- " [-b memory] [-v vcpus]\n", name);
+ " [-b memory] [-v vcpus] [-o]\n", name);
guest_modes_help();
printf(" -u: use User Fault FD to handle vCPU page\n"
" faults.\n");
@@ -387,6 +399,8 @@ static void help(char *name)
" demand paged by each vCPU. e.g. 10M or 3G.\n"
" Default: 1G\n");
printf(" -v: specify the number of vCPUs to run.\n");
+ printf(" -o: Overlap guest memory accesses instead of partitioning\n"
+ " them into a separate region of memory for each vCPU.\n");
puts("");
exit(0);
}
@@ -394,12 +408,14 @@ static void help(char *name)
int main(int argc, char *argv[])
{
int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
- struct test_params p = {};
+ struct test_params p = {
+ .partition_vcpu_memory_access = true,
+ };
int opt;
guest_modes_append_default();
- while ((opt = getopt(argc, argv, "hm:ud:b:v:")) != -1) {
+ while ((opt = getopt(argc, argv, "hm:ud:b:v:o")) != -1) {
switch (opt) {
case 'm':
guest_modes_cmdline(optarg);
@@ -419,6 +435,9 @@ int main(int argc, char *argv[])
TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
"Invalid number of vcpus, must be between 1 and %d", max_vcpus);
break;
+ case 'o':
+ p.partition_vcpu_memory_access = false;
+ break;
case 'h':
default:
help(argv[0]);
diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index 2283a0ec74a9..04a2641261be 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -28,8 +28,8 @@ static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
/* Host variables */
static u64 dirty_log_manual_caps;
static bool host_quit;
-static uint64_t iteration;
-static uint64_t vcpu_last_completed_iteration[KVM_MAX_VCPUS];
+static int iteration;
+static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
static void *vcpu_worker(void *data)
{
@@ -48,11 +48,11 @@ static void *vcpu_worker(void *data)
run = vcpu_state(vm, vcpu_id);
while (!READ_ONCE(host_quit)) {
- uint64_t current_iteration = READ_ONCE(iteration);
+ int current_iteration = READ_ONCE(iteration);
clock_gettime(CLOCK_MONOTONIC, &start);
ret = _vcpu_run(vm, vcpu_id);
- ts_diff = timespec_diff_now(start);
+ ts_diff = timespec_elapsed(start);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
TEST_ASSERT(get_ucall(vm, vcpu_id, NULL) == UCALL_SYNC,
@@ -61,17 +61,17 @@ static void *vcpu_worker(void *data)
pr_debug("Got sync event from vCPU %d\n", vcpu_id);
vcpu_last_completed_iteration[vcpu_id] = current_iteration;
- pr_debug("vCPU %d updated last completed iteration to %lu\n",
+ pr_debug("vCPU %d updated last completed iteration to %d\n",
vcpu_id, vcpu_last_completed_iteration[vcpu_id]);
if (current_iteration) {
pages_count += vcpu_args->pages;
total = timespec_add(total, ts_diff);
- pr_debug("vCPU %d iteration %lu dirty memory time: %ld.%.9lds\n",
+ pr_debug("vCPU %d iteration %d dirty memory time: %ld.%.9lds\n",
vcpu_id, current_iteration, ts_diff.tv_sec,
ts_diff.tv_nsec);
} else {
- pr_debug("vCPU %d iteration %lu populate memory time: %ld.%.9lds\n",
+ pr_debug("vCPU %d iteration %d populate memory time: %ld.%.9lds\n",
vcpu_id, current_iteration, ts_diff.tv_sec,
ts_diff.tv_nsec);
}
@@ -81,7 +81,7 @@ static void *vcpu_worker(void *data)
}
avg = timespec_div(total, vcpu_last_completed_iteration[vcpu_id]);
- pr_debug("\nvCPU %d dirtied 0x%lx pages over %lu iterations in %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
+ pr_debug("\nvCPU %d dirtied 0x%lx pages over %d iterations in %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
vcpu_id, pages_count, vcpu_last_completed_iteration[vcpu_id],
total.tv_sec, total.tv_nsec, avg.tv_sec, avg.tv_nsec);
@@ -92,6 +92,8 @@ struct test_params {
unsigned long iterations;
uint64_t phys_offset;
int wr_fract;
+ bool partition_vcpu_memory_access;
+ enum vm_mem_backing_src_type backing_src;
};
static void run_test(enum vm_guest_mode mode, void *arg)
@@ -111,7 +113,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct kvm_enable_cap cap = {};
struct timespec clear_dirty_log_total = (struct timespec){0};
- vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size);
+ vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
+ p->backing_src);
perf_test_args.wr_fract = p->wr_fract;
@@ -129,7 +132,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
TEST_ASSERT(vcpu_threads, "Memory allocation failed");
- perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
+ perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size,
+ p->partition_vcpu_memory_access);
sync_global_to_guest(vm, perf_test_args);
@@ -139,17 +143,21 @@ static void run_test(enum vm_guest_mode mode, void *arg)
clock_gettime(CLOCK_MONOTONIC, &start);
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
+ vcpu_last_completed_iteration[vcpu_id] = -1;
+
pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker,
&perf_test_args.vcpu_args[vcpu_id]);
}
- /* Allow the vCPU to populate memory */
- pr_debug("Starting iteration %lu - Populating\n", iteration);
- while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) != iteration)
- pr_debug("Waiting for vcpu_last_completed_iteration == %lu\n",
- iteration);
+ /* Allow the vCPUs to populate memory */
+ pr_debug("Starting iteration %d - Populating\n", iteration);
+ for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
+ while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) !=
+ iteration)
+ ;
+ }
- ts_diff = timespec_diff_now(start);
+ ts_diff = timespec_elapsed(start);
pr_info("Populate memory time: %ld.%.9lds\n",
ts_diff.tv_sec, ts_diff.tv_nsec);
@@ -157,7 +165,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
clock_gettime(CLOCK_MONOTONIC, &start);
vm_mem_region_set_flags(vm, PERF_TEST_MEM_SLOT_INDEX,
KVM_MEM_LOG_DIRTY_PAGES);
- ts_diff = timespec_diff_now(start);
+ ts_diff = timespec_elapsed(start);
pr_info("Enabling dirty logging time: %ld.%.9lds\n\n",
ts_diff.tv_sec, ts_diff.tv_nsec);
@@ -169,25 +177,25 @@ static void run_test(enum vm_guest_mode mode, void *arg)
clock_gettime(CLOCK_MONOTONIC, &start);
iteration++;
- pr_debug("Starting iteration %lu\n", iteration);
+ pr_debug("Starting iteration %d\n", iteration);
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
- while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) != iteration)
- pr_debug("Waiting for vCPU %d vcpu_last_completed_iteration == %lu\n",
- vcpu_id, iteration);
+ while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id])
+ != iteration)
+ ;
}
- ts_diff = timespec_diff_now(start);
+ ts_diff = timespec_elapsed(start);
vcpu_dirty_total = timespec_add(vcpu_dirty_total, ts_diff);
- pr_info("Iteration %lu dirty memory time: %ld.%.9lds\n",
+ pr_info("Iteration %d dirty memory time: %ld.%.9lds\n",
iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
clock_gettime(CLOCK_MONOTONIC, &start);
kvm_vm_get_dirty_log(vm, PERF_TEST_MEM_SLOT_INDEX, bmap);
- ts_diff = timespec_diff_now(start);
+ ts_diff = timespec_elapsed(start);
get_dirty_log_total = timespec_add(get_dirty_log_total,
ts_diff);
- pr_info("Iteration %lu get dirty log time: %ld.%.9lds\n",
+ pr_info("Iteration %d get dirty log time: %ld.%.9lds\n",
iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
if (dirty_log_manual_caps) {
@@ -195,26 +203,26 @@ static void run_test(enum vm_guest_mode mode, void *arg)
kvm_vm_clear_dirty_log(vm, PERF_TEST_MEM_SLOT_INDEX, bmap, 0,
host_num_pages);
- ts_diff = timespec_diff_now(start);
+ ts_diff = timespec_elapsed(start);
clear_dirty_log_total = timespec_add(clear_dirty_log_total,
ts_diff);
- pr_info("Iteration %lu clear dirty log time: %ld.%.9lds\n",
+ pr_info("Iteration %d clear dirty log time: %ld.%.9lds\n",
iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
}
}
- /* Tell the vcpu thread to quit */
- host_quit = true;
- for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
- pthread_join(vcpu_threads[vcpu_id], NULL);
-
/* Disable dirty logging */
clock_gettime(CLOCK_MONOTONIC, &start);
vm_mem_region_set_flags(vm, PERF_TEST_MEM_SLOT_INDEX, 0);
- ts_diff = timespec_diff_now(start);
+ ts_diff = timespec_elapsed(start);
pr_info("Disabling dirty logging time: %ld.%.9lds\n",
ts_diff.tv_sec, ts_diff.tv_nsec);
+ /* Tell the vcpu thread to quit */
+ host_quit = true;
+ for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
+ pthread_join(vcpu_threads[vcpu_id], NULL);
+
avg = timespec_div(get_dirty_log_total, p->iterations);
pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
p->iterations, get_dirty_log_total.tv_sec,
@@ -236,7 +244,7 @@ static void help(char *name)
{
puts("");
printf("usage: %s [-h] [-i iterations] [-p offset] "
- "[-m mode] [-b vcpu bytes] [-v vcpus]\n", name);
+ "[-m mode] [-b vcpu bytes] [-v vcpus] [-o] [-s mem type]\n", name);
puts("");
printf(" -i: specify iteration counts (default: %"PRIu64")\n",
TEST_HOST_LOOP_N);
@@ -251,6 +259,11 @@ static void help(char *name)
" 1/<fraction of pages to write>.\n"
" (default: 1 i.e. all pages are written to.)\n");
printf(" -v: specify the number of vCPUs to run.\n");
+ printf(" -o: Overlap guest memory accesses instead of partitioning\n"
+ " them into a separate region of memory for each vCPU.\n");
+ printf(" -s: specify the type of memory that should be used to\n"
+ " back the guest data region.\n\n");
+ backing_src_help();
puts("");
exit(0);
}
@@ -261,6 +274,8 @@ int main(int argc, char *argv[])
struct test_params p = {
.iterations = TEST_HOST_LOOP_N,
.wr_fract = 1,
+ .partition_vcpu_memory_access = true,
+ .backing_src = VM_MEM_SRC_ANONYMOUS,
};
int opt;
@@ -271,10 +286,10 @@ int main(int argc, char *argv[])
guest_modes_append_default();
- while ((opt = getopt(argc, argv, "hi:p:m:b:f:v:")) != -1) {
+ while ((opt = getopt(argc, argv, "hi:p:m:b:f:v:os:")) != -1) {
switch (opt) {
case 'i':
- p.iterations = strtol(optarg, NULL, 10);
+ p.iterations = atoi(optarg);
break;
case 'p':
p.phys_offset = strtoull(optarg, NULL, 0);
@@ -295,6 +310,11 @@ int main(int argc, char *argv[])
TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
"Invalid number of vcpus, must be between 1 and %d", max_vcpus);
break;
+ case 'o':
+ p.partition_vcpu_memory_access = false;
+ case 's':
+ p.backing_src = parse_backing_src_type(optarg);
+ break;
case 'h':
default:
help(argv[0]);
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index 5cbb861525ed..2d7eb6989e83 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -79,12 +79,6 @@ struct vm_guest_mode_params {
};
extern const struct vm_guest_mode_params vm_guest_mode_params[];
-enum vm_mem_backing_src_type {
- VM_MEM_SRC_ANONYMOUS,
- VM_MEM_SRC_ANONYMOUS_THP,
- VM_MEM_SRC_ANONYMOUS_HUGETLB,
-};
-
int kvm_check_cap(long cap);
int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
diff --git a/tools/testing/selftests/kvm/include/numaif.h b/tools/testing/selftests/kvm/include/numaif.h
new file mode 100644
index 000000000000..b020547403fd
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/numaif.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * tools/testing/selftests/kvm/include/numaif.h
+ *
+ * Copyright (C) 2020, Google LLC.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Header file that provides access to NUMA API functions not explicitly
+ * exported to user space.
+ */
+
+#ifndef SELFTEST_KVM_NUMAIF_H
+#define SELFTEST_KVM_NUMAIF_H
+
+#define __NR_get_mempolicy 239
+#define __NR_migrate_pages 256
+
+/* System calls */
+long get_mempolicy(int *policy, const unsigned long *nmask,
+ unsigned long maxnode, void *addr, int flags)
+{
+ return syscall(__NR_get_mempolicy, policy, nmask,
+ maxnode, addr, flags);
+}
+
+long migrate_pages(int pid, unsigned long maxnode,
+ const unsigned long *frommask,
+ const unsigned long *tomask)
+{
+ return syscall(__NR_migrate_pages, pid, maxnode, frommask, tomask);
+}
+
+/* Policies */
+#define MPOL_DEFAULT 0
+#define MPOL_PREFERRED 1
+#define MPOL_BIND 2
+#define MPOL_INTERLEAVE 3
+
+#define MPOL_MAX MPOL_INTERLEAVE
+
+/* Flags for get_mem_policy */
+#define MPOL_F_NODE (1<<0) /* return next il node or node of address */
+ /* Warning: MPOL_F_NODE is unsupported and
+ * subject to change. Don't use.
+ */
+#define MPOL_F_ADDR (1<<1) /* look up vma using address */
+#define MPOL_F_MEMS_ALLOWED (1<<2) /* query nodes allowed in cpuset */
+
+/* Flags for mbind */
+#define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
+#define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */
+#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
+
+#endif /* SELFTEST_KVM_NUMAIF_H */
diff --git a/tools/testing/selftests/kvm/include/perf_test_util.h b/tools/testing/selftests/kvm/include/perf_test_util.h
index b1188823c31b..005f2143adeb 100644
--- a/tools/testing/selftests/kvm/include/perf_test_util.h
+++ b/tools/testing/selftests/kvm/include/perf_test_util.h
@@ -44,8 +44,11 @@ extern struct perf_test_args perf_test_args;
extern uint64_t guest_test_phys_mem;
struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
- uint64_t vcpu_memory_bytes);
+ uint64_t vcpu_memory_bytes,
+ enum vm_mem_backing_src_type backing_src);
void perf_test_destroy_vm(struct kvm_vm *vm);
-void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes);
+void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus,
+ uint64_t vcpu_memory_bytes,
+ bool partition_vcpu_memory_access);
#endif /* SELFTEST_KVM_PERF_TEST_UTIL_H */
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h
index ffffa560436b..b7f41399f22c 100644
--- a/tools/testing/selftests/kvm/include/test_util.h
+++ b/tools/testing/selftests/kvm/include/test_util.h
@@ -64,7 +64,21 @@ int64_t timespec_to_ns(struct timespec ts);
struct timespec timespec_add_ns(struct timespec ts, int64_t ns);
struct timespec timespec_add(struct timespec ts1, struct timespec ts2);
struct timespec timespec_sub(struct timespec ts1, struct timespec ts2);
-struct timespec timespec_diff_now(struct timespec start);
+struct timespec timespec_elapsed(struct timespec start);
struct timespec timespec_div(struct timespec ts, int divisor);
+enum vm_mem_backing_src_type {
+ VM_MEM_SRC_ANONYMOUS,
+ VM_MEM_SRC_ANONYMOUS_THP,
+ VM_MEM_SRC_ANONYMOUS_HUGETLB,
+};
+
+struct vm_mem_backing_src_alias {
+ const char *name;
+ enum vm_mem_backing_src_type type;
+};
+
+void backing_src_help(void);
+enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name);
+
#endif /* SELFTEST_KVM_TEST_UTIL_H */
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index 90cd5984751b..0b30b4e15c38 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -263,6 +263,19 @@ static inline void outl(uint16_t port, uint32_t value)
__asm__ __volatile__("outl %%eax, %%dx" : : "d"(port), "a"(value));
}
+static inline void cpuid(uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
+{
+ /* ecx is often an input as well as an output. */
+ asm volatile("cpuid"
+ : "=a" (*eax),
+ "=b" (*ebx),
+ "=c" (*ecx),
+ "=d" (*edx)
+ : "0" (*eax), "2" (*ecx)
+ : "memory");
+}
+
#define SET_XMM(__var, __xmm) \
asm volatile("movq %0, %%"#__xmm : : "r"(__var) : #__xmm)
@@ -338,8 +351,10 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid,
struct kvm_x86_state *state);
struct kvm_msr_list *kvm_get_msr_index_list(void);
-
+uint64_t kvm_get_feature_msr(uint64_t msr_index);
struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
+
+struct kvm_cpuid2 *vcpu_get_cpuid(struct kvm_vm *vm, uint32_t vcpuid);
void vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid,
struct kvm_cpuid2 *cpuid);
@@ -391,6 +406,10 @@ bool set_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 *ent);
uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
uint64_t a3);
+struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void);
+void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid);
+struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid);
+
/*
* Basic CPU control in CR0
*/
@@ -406,8 +425,27 @@ uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
#define X86_CR0_CD (1UL<<30) /* Cache Disable */
#define X86_CR0_PG (1UL<<31) /* Paging */
+#define APIC_DEFAULT_GPA 0xfee00000ULL
+
+/* APIC base address MSR and fields */
+#define MSR_IA32_APICBASE 0x0000001b
+#define MSR_IA32_APICBASE_BSP (1<<8)
+#define MSR_IA32_APICBASE_EXTD (1<<10)
+#define MSR_IA32_APICBASE_ENABLE (1<<11)
+#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
+#define GET_APIC_BASE(x) (((x) >> 12) << 12)
+
#define APIC_BASE_MSR 0x800
#define X2APIC_ENABLE (1UL << 10)
+#define APIC_ID 0x20
+#define APIC_LVR 0x30
+#define GET_APIC_ID_FIELD(x) (((x) >> 24) & 0xFF)
+#define APIC_TASKPRI 0x80
+#define APIC_PROCPRI 0xA0
+#define APIC_EOI 0xB0
+#define APIC_SPIV 0xF0
+#define APIC_SPIV_FOCUS_DISABLED (1 << 9)
+#define APIC_SPIV_APIC_ENABLED (1 << 8)
#define APIC_ICR 0x300
#define APIC_DEST_SELF 0x40000
#define APIC_DEST_ALLINC 0x80000
@@ -432,6 +470,7 @@ uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
#define APIC_DM_EXTINT 0x00700
#define APIC_VECTOR_MASK 0x000FF
#define APIC_ICR2 0x310
+#define SET_APIC_DEST_FIELD(x) ((x) << 24)
/* VMX_EPT_VPID_CAP bits */
#define VMX_EPT_VPID_CAP_AD_BITS (1ULL << 21)
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index fa5a90e6c6f0..d787cb802b4a 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -1801,6 +1801,7 @@ static struct exit_reason {
{KVM_EXIT_DIRTY_RING_FULL, "DIRTY_RING_FULL"},
{KVM_EXIT_X86_RDMSR, "RDMSR"},
{KVM_EXIT_X86_WRMSR, "WRMSR"},
+ {KVM_EXIT_XEN, "XEN"},
#ifdef KVM_EXIT_MEMORY_NOT_PRESENT
{KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"},
#endif
diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c
index 9be1944c2d1c..81490b9b4e32 100644
--- a/tools/testing/selftests/kvm/lib/perf_test_util.c
+++ b/tools/testing/selftests/kvm/lib/perf_test_util.c
@@ -49,7 +49,8 @@ static void guest_code(uint32_t vcpu_id)
}
struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
- uint64_t vcpu_memory_bytes)
+ uint64_t vcpu_memory_bytes,
+ enum vm_mem_backing_src_type backing_src)
{
struct kvm_vm *vm;
uint64_t guest_num_pages;
@@ -93,8 +94,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
/* Add an extra memory slot for testing */
- vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
- guest_test_phys_mem,
+ vm_userspace_mem_region_add(vm, backing_src, guest_test_phys_mem,
PERF_TEST_MEM_SLOT_INDEX,
guest_num_pages, 0);
@@ -112,7 +112,9 @@ void perf_test_destroy_vm(struct kvm_vm *vm)
kvm_vm_free(vm);
}
-void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes)
+void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus,
+ uint64_t vcpu_memory_bytes,
+ bool partition_vcpu_memory_access)
{
vm_paddr_t vcpu_gpa;
struct perf_test_vcpu_args *vcpu_args;
@@ -122,13 +124,22 @@ void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_by
vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
vcpu_args->vcpu_id = vcpu_id;
- vcpu_args->gva = guest_test_virt_mem +
- (vcpu_id * vcpu_memory_bytes);
- vcpu_args->pages = vcpu_memory_bytes /
- perf_test_args.guest_page_size;
+ if (partition_vcpu_memory_access) {
+ vcpu_args->gva = guest_test_virt_mem +
+ (vcpu_id * vcpu_memory_bytes);
+ vcpu_args->pages = vcpu_memory_bytes /
+ perf_test_args.guest_page_size;
+ vcpu_gpa = guest_test_phys_mem +
+ (vcpu_id * vcpu_memory_bytes);
+ } else {
+ vcpu_args->gva = guest_test_virt_mem;
+ vcpu_args->pages = (vcpus * vcpu_memory_bytes) /
+ perf_test_args.guest_page_size;
+ vcpu_gpa = guest_test_phys_mem;
+ }
- vcpu_gpa = guest_test_phys_mem + (vcpu_id * vcpu_memory_bytes);
pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
- vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_memory_bytes);
+ vcpu_id, vcpu_gpa, vcpu_gpa +
+ (vcpu_args->pages * perf_test_args.guest_page_size));
}
}
diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c
index 8e04c0b1608e..906c955384e2 100644
--- a/tools/testing/selftests/kvm/lib/test_util.c
+++ b/tools/testing/selftests/kvm/lib/test_util.c
@@ -10,6 +10,7 @@
#include <limits.h>
#include <stdlib.h>
#include <time.h>
+#include "linux/kernel.h"
#include "test_util.h"
@@ -84,7 +85,7 @@ struct timespec timespec_sub(struct timespec ts1, struct timespec ts2)
return timespec_add_ns((struct timespec){0}, ns1 - ns2);
}
-struct timespec timespec_diff_now(struct timespec start)
+struct timespec timespec_elapsed(struct timespec start)
{
struct timespec end;
@@ -109,3 +110,31 @@ void print_skip(const char *fmt, ...)
va_end(ap);
puts(", skipping test");
}
+
+const struct vm_mem_backing_src_alias backing_src_aliases[] = {
+ {"anonymous", VM_MEM_SRC_ANONYMOUS,},
+ {"anonymous_thp", VM_MEM_SRC_ANONYMOUS_THP,},
+ {"anonymous_hugetlb", VM_MEM_SRC_ANONYMOUS_HUGETLB,},
+};
+
+void backing_src_help(void)
+{
+ int i;
+
+ printf("Available backing src types:\n");
+ for (i = 0; i < ARRAY_SIZE(backing_src_aliases); i++)
+ printf("\t%s\n", backing_src_aliases[i].name);
+}
+
+enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(backing_src_aliases); i++)
+ if (!strcmp(type_name, backing_src_aliases[i].name))
+ return backing_src_aliases[i].type;
+
+ backing_src_help();
+ TEST_FAIL("Unknown backing src type: %s", type_name);
+ return -1;
+}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index 95e1a757c629..de0c76177d02 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -670,6 +670,82 @@ struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
}
/*
+ * KVM Get MSR
+ *
+ * Input Args:
+ * msr_index - Index of MSR
+ *
+ * Output Args: None
+ *
+ * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced.
+ *
+ * Get value of MSR for VCPU.
+ */
+uint64_t kvm_get_feature_msr(uint64_t msr_index)
+{
+ struct {
+ struct kvm_msrs header;
+ struct kvm_msr_entry entry;
+ } buffer = {};
+ int r, kvm_fd;
+
+ buffer.header.nmsrs = 1;
+ buffer.entry.index = msr_index;
+ kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
+ if (kvm_fd < 0)
+ exit(KSFT_SKIP);
+
+ r = ioctl(kvm_fd, KVM_GET_MSRS, &buffer.header);
+ TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n"
+ " rc: %i errno: %i", r, errno);
+
+ close(kvm_fd);
+ return buffer.entry.data;
+}
+
+/*
+ * VM VCPU CPUID Set
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * vcpuid - VCPU id
+ *
+ * Output Args: None
+ *
+ * Return: KVM CPUID (KVM_GET_CPUID2)
+ *
+ * Set the VCPU's CPUID.
+ */
+struct kvm_cpuid2 *vcpu_get_cpuid(struct kvm_vm *vm, uint32_t vcpuid)
+{
+ struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+ struct kvm_cpuid2 *cpuid;
+ int rc, max_ent;
+
+ TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
+
+ cpuid = allocate_kvm_cpuid2();
+ max_ent = cpuid->nent;
+
+ for (cpuid->nent = 1; cpuid->nent <= max_ent; cpuid->nent++) {
+ rc = ioctl(vcpu->fd, KVM_GET_CPUID2, cpuid);
+ if (!rc)
+ break;
+
+ TEST_ASSERT(rc == -1 && errno == E2BIG,
+ "KVM_GET_CPUID2 should either succeed or give E2BIG: %d %d",
+ rc, errno);
+ }
+
+ TEST_ASSERT(rc == 0, "KVM_GET_CPUID2 failed, rc: %i errno: %i",
+ rc, errno);
+
+ return cpuid;
+}
+
+
+
+/*
* Locate a cpuid entry.
*
* Input Args:
@@ -1224,3 +1300,71 @@ uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
: "b"(a0), "c"(a1), "d"(a2), "S"(a3));
return r;
}
+
+struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void)
+{
+ static struct kvm_cpuid2 *cpuid;
+ int ret;
+ int kvm_fd;
+
+ if (cpuid)
+ return cpuid;
+
+ cpuid = allocate_kvm_cpuid2();
+ kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
+ if (kvm_fd < 0)
+ exit(KSFT_SKIP);
+
+ ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
+ TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_HV_CPUID failed %d %d\n",
+ ret, errno);
+
+ close(kvm_fd);
+ return cpuid;
+}
+
+void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid)
+{
+ static struct kvm_cpuid2 *cpuid_full;
+ struct kvm_cpuid2 *cpuid_sys, *cpuid_hv;
+ int i, nent = 0;
+
+ if (!cpuid_full) {
+ cpuid_sys = kvm_get_supported_cpuid();
+ cpuid_hv = kvm_get_supported_hv_cpuid();
+
+ cpuid_full = malloc(sizeof(*cpuid_full) +
+ (cpuid_sys->nent + cpuid_hv->nent) *
+ sizeof(struct kvm_cpuid_entry2));
+ if (!cpuid_full) {
+ perror("malloc");
+ abort();
+ }
+
+ /* Need to skip KVM CPUID leaves 0x400000xx */
+ for (i = 0; i < cpuid_sys->nent; i++) {
+ if (cpuid_sys->entries[i].function >= 0x40000000 &&
+ cpuid_sys->entries[i].function < 0x40000100)
+ continue;
+ cpuid_full->entries[nent] = cpuid_sys->entries[i];
+ nent++;
+ }
+
+ memcpy(&cpuid_full->entries[nent], cpuid_hv->entries,
+ cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2));
+ cpuid_full->nent = nent + cpuid_hv->nent;
+ }
+
+ vcpu_set_cpuid(vm, vcpuid, cpuid_full);
+}
+
+struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid)
+{
+ static struct kvm_cpuid2 *cpuid;
+
+ cpuid = allocate_kvm_cpuid2();
+
+ vcpu_ioctl(vm, vcpuid, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
+
+ return cpuid;
+}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/svm.c b/tools/testing/selftests/kvm/lib/x86_64/svm.c
index 3a5c72ed2b79..827fe6028dd4 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/svm.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/svm.c
@@ -74,7 +74,7 @@ void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_r
wrmsr(MSR_VM_HSAVE_PA, svm->save_area_gpa);
memset(vmcb, 0, sizeof(*vmcb));
- asm volatile ("vmsave\n\t" : : "a" (vmcb_gpa) : "memory");
+ asm volatile ("vmsave %0\n\t" : : "a" (vmcb_gpa) : "memory");
vmcb_set_seg(&save->es, get_es(), 0, -1U, data_seg_attr);
vmcb_set_seg(&save->cs, get_cs(), 0, -1U, code_seg_attr);
vmcb_set_seg(&save->ss, get_ss(), 0, -1U, data_seg_attr);
@@ -131,19 +131,19 @@ void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_r
void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa)
{
asm volatile (
- "vmload\n\t"
+ "vmload %[vmcb_gpa]\n\t"
"mov rflags, %%r15\n\t" // rflags
"mov %%r15, 0x170(%[vmcb])\n\t"
"mov guest_regs, %%r15\n\t" // rax
"mov %%r15, 0x1f8(%[vmcb])\n\t"
LOAD_GPR_C
- "vmrun\n\t"
+ "vmrun %[vmcb_gpa]\n\t"
SAVE_GPR_C
"mov 0x170(%[vmcb]), %%r15\n\t" // rflags
"mov %%r15, rflags\n\t"
"mov 0x1f8(%[vmcb]), %%r15\n\t" // rax
"mov %%r15, guest_regs\n\t"
- "vmsave\n\t"
+ "vmsave %[vmcb_gpa]\n\t"
: : [vmcb] "r" (vmcb), [vmcb_gpa] "a" (vmcb_gpa)
: "r15", "memory");
}
diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
new file mode 100644
index 000000000000..6096bf0a5b34
--- /dev/null
+++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KVM memslot modification stress test
+ * Adapted from demand_paging_test.c
+ *
+ * Copyright (C) 2018, Red Hat, Inc.
+ * Copyright (C) 2020, Google, Inc.
+ */
+
+#define _GNU_SOURCE /* for program_invocation_name */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#include <asm/unistd.h>
+#include <time.h>
+#include <poll.h>
+#include <pthread.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/userfaultfd.h>
+
+#include "perf_test_util.h"
+#include "processor.h"
+#include "test_util.h"
+#include "guest_modes.h"
+
+#define DUMMY_MEMSLOT_INDEX 7
+
+#define DEFAULT_MEMSLOT_MODIFICATION_ITERATIONS 10
+
+
+static int nr_vcpus = 1;
+static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
+
+static bool run_vcpus = true;
+
+static void *vcpu_worker(void *data)
+{
+ int ret;
+ struct perf_test_vcpu_args *vcpu_args =
+ (struct perf_test_vcpu_args *)data;
+ int vcpu_id = vcpu_args->vcpu_id;
+ struct kvm_vm *vm = perf_test_args.vm;
+ struct kvm_run *run;
+
+ vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
+ run = vcpu_state(vm, vcpu_id);
+
+ /* Let the guest access its memory until a stop signal is received */
+ while (READ_ONCE(run_vcpus)) {
+ ret = _vcpu_run(vm, vcpu_id);
+ TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
+
+ if (get_ucall(vm, vcpu_id, NULL) == UCALL_SYNC)
+ continue;
+
+ TEST_ASSERT(false,
+ "Invalid guest sync status: exit_reason=%s\n",
+ exit_reason_str(run->exit_reason));
+ }
+
+ return NULL;
+}
+
+struct memslot_antagonist_args {
+ struct kvm_vm *vm;
+ useconds_t delay;
+ uint64_t nr_modifications;
+};
+
+static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay,
+ uint64_t nr_modifications, uint64_t gpa)
+{
+ int i;
+
+ for (i = 0; i < nr_modifications; i++) {
+ usleep(delay);
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, gpa,
+ DUMMY_MEMSLOT_INDEX, 1, 0);
+
+ vm_mem_region_delete(vm, DUMMY_MEMSLOT_INDEX);
+ }
+}
+
+struct test_params {
+ useconds_t memslot_modification_delay;
+ uint64_t nr_memslot_modifications;
+ bool partition_vcpu_memory_access;
+};
+
+static void run_test(enum vm_guest_mode mode, void *arg)
+{
+ struct test_params *p = arg;
+ pthread_t *vcpu_threads;
+ struct kvm_vm *vm;
+ int vcpu_id;
+
+ vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
+ VM_MEM_SRC_ANONYMOUS);
+
+ perf_test_args.wr_fract = 1;
+
+ vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
+ TEST_ASSERT(vcpu_threads, "Memory allocation failed");
+
+ perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size,
+ p->partition_vcpu_memory_access);
+
+ /* Export the shared variables to the guest */
+ sync_global_to_guest(vm, perf_test_args);
+
+ pr_info("Finished creating vCPUs\n");
+
+ for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
+ pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker,
+ &perf_test_args.vcpu_args[vcpu_id]);
+
+ pr_info("Started all vCPUs\n");
+
+ add_remove_memslot(vm, p->memslot_modification_delay,
+ p->nr_memslot_modifications,
+ guest_test_phys_mem +
+ (guest_percpu_mem_size * nr_vcpus) +
+ perf_test_args.host_page_size +
+ perf_test_args.guest_page_size);
+
+ run_vcpus = false;
+
+ /* Wait for the vcpu threads to quit */
+ for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
+ pthread_join(vcpu_threads[vcpu_id], NULL);
+
+ pr_info("All vCPU threads joined\n");
+
+ ucall_uninit(vm);
+ kvm_vm_free(vm);
+
+ free(vcpu_threads);
+}
+
+static void help(char *name)
+{
+ puts("");
+ printf("usage: %s [-h] [-m mode] [-d delay_usec]\n"
+ " [-b memory] [-v vcpus] [-o] [-i iterations]\n", name);
+ guest_modes_help();
+ printf(" -d: add a delay between each iteration of adding and\n"
+ " deleting a memslot in usec.\n");
+ printf(" -b: specify the size of the memory region which should be\n"
+ " accessed by each vCPU. e.g. 10M or 3G.\n"
+ " Default: 1G\n");
+ printf(" -v: specify the number of vCPUs to run.\n");
+ printf(" -o: Overlap guest memory accesses instead of partitioning\n"
+ " them into a separate region of memory for each vCPU.\n");
+ printf(" -i: specify the number of iterations of adding and removing\n"
+ " a memslot.\n"
+ " Default: %d\n", DEFAULT_MEMSLOT_MODIFICATION_ITERATIONS);
+ puts("");
+ exit(0);
+}
+
+int main(int argc, char *argv[])
+{
+ int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
+ int opt;
+ struct test_params p = {
+ .memslot_modification_delay = 0,
+ .nr_memslot_modifications =
+ DEFAULT_MEMSLOT_MODIFICATION_ITERATIONS,
+ .partition_vcpu_memory_access = true
+ };
+
+ guest_modes_append_default();
+
+ while ((opt = getopt(argc, argv, "hm:d:b:v:oi:")) != -1) {
+ switch (opt) {
+ case 'm':
+ guest_modes_cmdline(optarg);
+ break;
+ case 'd':
+ p.memslot_modification_delay = strtoul(optarg, NULL, 0);
+ TEST_ASSERT(p.memslot_modification_delay >= 0,
+ "A negative delay is not supported.");
+ break;
+ case 'b':
+ guest_percpu_mem_size = parse_size(optarg);
+ break;
+ case 'v':
+ nr_vcpus = atoi(optarg);
+ TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
+ "Invalid number of vcpus, must be between 1 and %d",
+ max_vcpus);
+ break;
+ case 'o':
+ p.partition_vcpu_memory_access = false;
+ break;
+ case 'i':
+ p.nr_memslot_modifications = atoi(optarg);
+ break;
+ case 'h':
+ default:
+ help(argv[0]);
+ break;
+ }
+ }
+
+ for_each_guest_mode(run_test, &p);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/settings b/tools/testing/selftests/kvm/settings
new file mode 100644
index 000000000000..6091b45d226b
--- /dev/null
+++ b/tools/testing/selftests/kvm/settings
@@ -0,0 +1 @@
+timeout=120
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
index 37b8a78f6b74..ca22ee6d19cb 100644
--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
@@ -99,6 +99,7 @@ int main(int argc, char *argv[])
exit(KSFT_SKIP);
}
+ vcpu_set_hv_cpuid(vm, VCPU_ID);
vcpu_enable_evmcs(vm, VCPU_ID);
run = vcpu_state(vm, VCPU_ID);
@@ -142,7 +143,7 @@ int main(int argc, char *argv[])
/* Restore state in a new VM. */
kvm_vm_restart(vm, O_RDWR);
vm_vcpu_add(vm, VCPU_ID);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+ vcpu_set_hv_cpuid(vm, VCPU_ID);
vcpu_enable_evmcs(vm, VCPU_ID);
vcpu_load_state(vm, VCPU_ID, state);
run = vcpu_state(vm, VCPU_ID);
diff --git a/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c b/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c
new file mode 100644
index 000000000000..9b78e8889638
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021, Red Hat Inc.
+ *
+ * Generic tests for KVM CPUID set/get ioctls
+ */
+#include <asm/kvm_para.h>
+#include <linux/kvm_para.h>
+#include <stdint.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+
+#define VCPU_ID 0
+
+/* CPUIDs known to differ */
+struct {
+ u32 function;
+ u32 index;
+} mangled_cpuids[] = {
+ {.function = 0xd, .index = 0},
+};
+
+static void test_guest_cpuids(struct kvm_cpuid2 *guest_cpuid)
+{
+ int i;
+ u32 eax, ebx, ecx, edx;
+
+ for (i = 0; i < guest_cpuid->nent; i++) {
+ eax = guest_cpuid->entries[i].function;
+ ecx = guest_cpuid->entries[i].index;
+
+ cpuid(&eax, &ebx, &ecx, &edx);
+
+ GUEST_ASSERT(eax == guest_cpuid->entries[i].eax &&
+ ebx == guest_cpuid->entries[i].ebx &&
+ ecx == guest_cpuid->entries[i].ecx &&
+ edx == guest_cpuid->entries[i].edx);
+ }
+
+}
+
+static void test_cpuid_40000000(struct kvm_cpuid2 *guest_cpuid)
+{
+ u32 eax = 0x40000000, ebx, ecx = 0, edx;
+
+ cpuid(&eax, &ebx, &ecx, &edx);
+
+ GUEST_ASSERT(eax == 0x40000001);
+}
+
+static void guest_main(struct kvm_cpuid2 *guest_cpuid)
+{
+ GUEST_SYNC(1);
+
+ test_guest_cpuids(guest_cpuid);
+
+ GUEST_SYNC(2);
+
+ test_cpuid_40000000(guest_cpuid);
+
+ GUEST_DONE();
+}
+
+static bool is_cpuid_mangled(struct kvm_cpuid_entry2 *entrie)
+{
+ int i;
+
+ for (i = 0; i < sizeof(mangled_cpuids); i++) {
+ if (mangled_cpuids[i].function == entrie->function &&
+ mangled_cpuids[i].index == entrie->index)
+ return true;
+ }
+
+ return false;
+}
+
+static void check_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 *entrie)
+{
+ int i;
+
+ for (i = 0; i < cpuid->nent; i++) {
+ if (cpuid->entries[i].function == entrie->function &&
+ cpuid->entries[i].index == entrie->index) {
+ if (is_cpuid_mangled(entrie))
+ return;
+
+ TEST_ASSERT(cpuid->entries[i].eax == entrie->eax &&
+ cpuid->entries[i].ebx == entrie->ebx &&
+ cpuid->entries[i].ecx == entrie->ecx &&
+ cpuid->entries[i].edx == entrie->edx,
+ "CPUID 0x%x.%x differ: 0x%x:0x%x:0x%x:0x%x vs 0x%x:0x%x:0x%x:0x%x",
+ entrie->function, entrie->index,
+ cpuid->entries[i].eax, cpuid->entries[i].ebx,
+ cpuid->entries[i].ecx, cpuid->entries[i].edx,
+ entrie->eax, entrie->ebx, entrie->ecx, entrie->edx);
+ return;
+ }
+ }
+
+ TEST_ASSERT(false, "CPUID 0x%x.%x not found", entrie->function, entrie->index);
+}
+
+static void compare_cpuids(struct kvm_cpuid2 *cpuid1, struct kvm_cpuid2 *cpuid2)
+{
+ int i;
+
+ for (i = 0; i < cpuid1->nent; i++)
+ check_cpuid(cpuid2, &cpuid1->entries[i]);
+
+ for (i = 0; i < cpuid2->nent; i++)
+ check_cpuid(cpuid1, &cpuid2->entries[i]);
+}
+
+static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid, int stage)
+{
+ struct ucall uc;
+
+ _vcpu_run(vm, vcpuid);
+
+ switch (get_ucall(vm, vcpuid, &uc)) {
+ case UCALL_SYNC:
+ TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
+ uc.args[1] == stage + 1,
+ "Stage %d: Unexpected register values vmexit, got %lx",
+ stage + 1, (ulong)uc.args[1]);
+ return;
+ case UCALL_DONE:
+ return;
+ case UCALL_ABORT:
+ TEST_ASSERT(false, "%s at %s:%ld\n\tvalues: %#lx, %#lx", (const char *)uc.args[0],
+ __FILE__, uc.args[1], uc.args[2], uc.args[3]);
+ default:
+ TEST_ASSERT(false, "Unexpected exit: %s",
+ exit_reason_str(vcpu_state(vm, vcpuid)->exit_reason));
+ }
+}
+
+struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, vm_vaddr_t *p_gva, struct kvm_cpuid2 *cpuid)
+{
+ int size = sizeof(*cpuid) + cpuid->nent * sizeof(cpuid->entries[0]);
+ vm_vaddr_t gva = vm_vaddr_alloc(vm, size,
+ getpagesize(), 0, 0);
+ struct kvm_cpuid2 *guest_cpuids = addr_gva2hva(vm, gva);
+
+ memcpy(guest_cpuids, cpuid, size);
+
+ *p_gva = gva;
+ return guest_cpuids;
+}
+
+int main(void)
+{
+ struct kvm_cpuid2 *supp_cpuid, *cpuid2;
+ vm_vaddr_t cpuid_gva;
+ struct kvm_vm *vm;
+ int stage;
+
+ vm = vm_create_default(VCPU_ID, 0, guest_main);
+
+ supp_cpuid = kvm_get_supported_cpuid();
+ cpuid2 = vcpu_get_cpuid(vm, VCPU_ID);
+
+ compare_cpuids(supp_cpuid, cpuid2);
+
+ vcpu_alloc_cpuid(vm, &cpuid_gva, cpuid2);
+
+ vcpu_args_set(vm, VCPU_ID, 1, cpuid_gva);
+
+ for (stage = 0; stage < 3; stage++)
+ run_vcpu(vm, VCPU_ID, stage);
+
+ kvm_vm_free(vm);
+}
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
index 88a595b7fbdd..7e2d2d17d2ed 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
@@ -125,30 +125,6 @@ void test_hv_cpuid_e2big(struct kvm_vm *vm, bool system)
" it should have: %d %d", system ? "KVM" : "vCPU", ret, errno);
}
-
-struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(struct kvm_vm *vm, bool system)
-{
- int nent = 20; /* should be enough */
- static struct kvm_cpuid2 *cpuid;
-
- cpuid = malloc(sizeof(*cpuid) + nent * sizeof(struct kvm_cpuid_entry2));
-
- if (!cpuid) {
- perror("malloc");
- abort();
- }
-
- cpuid->nent = nent;
-
- if (!system)
- vcpu_ioctl(vm, VCPU_ID, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
- else
- kvm_ioctl(vm, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
-
- return cpuid;
-}
-
-
int main(int argc, char *argv[])
{
struct kvm_vm *vm;
@@ -167,7 +143,7 @@ int main(int argc, char *argv[])
/* Test vCPU ioctl version */
test_hv_cpuid_e2big(vm, false);
- hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm, false);
+ hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vm, VCPU_ID);
test_hv_cpuid(hv_cpuid_entries, false);
free(hv_cpuid_entries);
@@ -177,7 +153,7 @@ int main(int argc, char *argv[])
goto do_sys;
}
vcpu_enable_evmcs(vm, VCPU_ID);
- hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm, false);
+ hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vm, VCPU_ID);
test_hv_cpuid(hv_cpuid_entries, true);
free(hv_cpuid_entries);
@@ -190,9 +166,8 @@ do_sys:
test_hv_cpuid_e2big(vm, true);
- hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm, true);
+ hv_cpuid_entries = kvm_get_supported_hv_cpuid();
test_hv_cpuid(hv_cpuid_entries, nested_vmx_supported());
- free(hv_cpuid_entries);
out:
kvm_vm_free(vm);
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c
new file mode 100644
index 000000000000..23051d84b907
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VMX-pmu related msrs test
+ *
+ * Copyright (C) 2021 Intel Corporation
+ *
+ * Test to check the effect of various CPUID settings
+ * on the MSR_IA32_PERF_CAPABILITIES MSR, and check that
+ * whatever we write with KVM_SET_MSR is _not_ modified
+ * in the guest and test it can be retrieved with KVM_GET_MSR.
+ *
+ * Test to check that invalid LBR formats are rejected.
+ */
+
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <sys/ioctl.h>
+
+#include "kvm_util.h"
+#include "vmx.h"
+
+#define VCPU_ID 0
+
+#define X86_FEATURE_PDCM (1<<15)
+#define PMU_CAP_FW_WRITES (1ULL << 13)
+#define PMU_CAP_LBR_FMT 0x3f
+
+union cpuid10_eax {
+ struct {
+ unsigned int version_id:8;
+ unsigned int num_counters:8;
+ unsigned int bit_width:8;
+ unsigned int mask_length:8;
+ } split;
+ unsigned int full;
+};
+
+union perf_capabilities {
+ struct {
+ u64 lbr_format:6;
+ u64 pebs_trap:1;
+ u64 pebs_arch_reg:1;
+ u64 pebs_format:4;
+ u64 smm_freeze:1;
+ u64 full_width_write:1;
+ u64 pebs_baseline:1;
+ u64 perf_metrics:1;
+ u64 pebs_output_pt_available:1;
+ u64 anythread_deprecated:1;
+ };
+ u64 capabilities;
+};
+
+static void guest_code(void)
+{
+ wrmsr(MSR_IA32_PERF_CAPABILITIES, PMU_CAP_LBR_FMT);
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_cpuid2 *cpuid;
+ struct kvm_cpuid_entry2 *entry_1_0;
+ struct kvm_cpuid_entry2 *entry_a_0;
+ bool pdcm_supported = false;
+ struct kvm_vm *vm;
+ int ret;
+ union cpuid10_eax eax;
+ union perf_capabilities host_cap;
+
+ host_cap.capabilities = kvm_get_feature_msr(MSR_IA32_PERF_CAPABILITIES);
+ host_cap.capabilities &= (PMU_CAP_FW_WRITES | PMU_CAP_LBR_FMT);
+
+ /* Create VM */
+ vm = vm_create_default(VCPU_ID, 0, guest_code);
+ cpuid = kvm_get_supported_cpuid();
+
+ if (kvm_get_cpuid_max_basic() >= 0xa) {
+ entry_1_0 = kvm_get_supported_cpuid_index(1, 0);
+ entry_a_0 = kvm_get_supported_cpuid_index(0xa, 0);
+ pdcm_supported = entry_1_0 && !!(entry_1_0->ecx & X86_FEATURE_PDCM);
+ eax.full = entry_a_0->eax;
+ }
+ if (!pdcm_supported) {
+ print_skip("MSR_IA32_PERF_CAPABILITIES is not supported by the vCPU");
+ exit(KSFT_SKIP);
+ }
+ if (!eax.split.version_id) {
+ print_skip("PMU is not supported by the vCPU");
+ exit(KSFT_SKIP);
+ }
+
+ /* testcase 1, set capabilities when we have PDCM bit */
+ vcpu_set_cpuid(vm, VCPU_ID, cpuid);
+ vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_FW_WRITES);
+
+ /* check capabilities can be retrieved with KVM_GET_MSR */
+ ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES);
+
+ /* check whatever we write with KVM_SET_MSR is _not_ modified */
+ vcpu_run(vm, VCPU_ID);
+ ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES);
+
+ /* testcase 2, check valid LBR formats are accepted */
+ vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, 0);
+ ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), 0);
+
+ vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, host_cap.lbr_format);
+ ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), (u64)host_cap.lbr_format);
+
+ /* testcase 3, check invalid LBR format is rejected */
+ ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_LBR_FMT);
+ TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail.");
+
+ /* testcase 4, set capabilities when we don't have PDCM bit */
+ entry_1_0->ecx &= ~X86_FEATURE_PDCM;
+ vcpu_set_cpuid(vm, VCPU_ID, cpuid);
+ ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities);
+ TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail.");
+
+ /* testcase 5, set capabilities when we don't have PMU version bits */
+ entry_1_0->ecx |= X86_FEATURE_PDCM;
+ eax.split.version_id = 0;
+ entry_1_0->ecx = eax.full;
+ vcpu_set_cpuid(vm, VCPU_ID, cpuid);
+ ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_FW_WRITES);
+ TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail.");
+
+ vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, 0);
+ ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), 0);
+
+ kvm_vm_free(vm);
+}
diff --git a/tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c b/tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c
new file mode 100644
index 000000000000..2f964cdc273c
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xapic_ipi_test
+ *
+ * Copyright (C) 2020, Google LLC.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Test that when the APIC is in xAPIC mode, a vCPU can send an IPI to wake
+ * another vCPU that is halted when KVM's backing page for the APIC access
+ * address has been moved by mm.
+ *
+ * The test starts two vCPUs: one that sends IPIs and one that continually
+ * executes HLT. The sender checks that the halter has woken from the HLT and
+ * has reentered HLT before sending the next IPI. While the vCPUs are running,
+ * the host continually calls migrate_pages to move all of the process' pages
+ * amongst the available numa nodes on the machine.
+ *
+ * Migration is a command line option. When used on non-numa machines will
+ * exit with error. Test is still usefull on non-numa for testing IPIs.
+ */
+
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <getopt.h>
+#include <pthread.h>
+#include <inttypes.h>
+#include <string.h>
+#include <time.h>
+
+#include "kvm_util.h"
+#include "numaif.h"
+#include "processor.h"
+#include "test_util.h"
+#include "vmx.h"
+
+/* Default running time for the test */
+#define DEFAULT_RUN_SECS 3
+
+/* Default delay between migrate_pages calls (microseconds) */
+#define DEFAULT_DELAY_USECS 500000
+
+#define HALTER_VCPU_ID 0
+#define SENDER_VCPU_ID 1
+
+volatile uint32_t *apic_base = (volatile uint32_t *)APIC_DEFAULT_GPA;
+
+/*
+ * Vector for IPI from sender vCPU to halting vCPU.
+ * Value is arbitrary and was chosen for the alternating bit pattern. Any
+ * value should work.
+ */
+#define IPI_VECTOR 0xa5
+
+/*
+ * Incremented in the IPI handler. Provides evidence to the sender that the IPI
+ * arrived at the destination
+ */
+static volatile uint64_t ipis_rcvd;
+
+/* Data struct shared between host main thread and vCPUs */
+struct test_data_page {
+ uint32_t halter_apic_id;
+ volatile uint64_t hlt_count;
+ volatile uint64_t wake_count;
+ uint64_t ipis_sent;
+ uint64_t migrations_attempted;
+ uint64_t migrations_completed;
+ uint32_t icr;
+ uint32_t icr2;
+ uint32_t halter_tpr;
+ uint32_t halter_ppr;
+
+ /*
+ * Record local version register as a cross-check that APIC access
+ * worked. Value should match what KVM reports (APIC_VERSION in
+ * arch/x86/kvm/lapic.c). If test is failing, check that values match
+ * to determine whether APIC access exits are working.
+ */
+ uint32_t halter_lvr;
+};
+
+struct thread_params {
+ struct test_data_page *data;
+ struct kvm_vm *vm;
+ uint32_t vcpu_id;
+ uint64_t *pipis_rcvd; /* host address of ipis_rcvd global */
+};
+
+uint32_t read_apic_reg(uint reg)
+{
+ return apic_base[reg >> 2];
+}
+
+void write_apic_reg(uint reg, uint32_t val)
+{
+ apic_base[reg >> 2] = val;
+}
+
+void disable_apic(void)
+{
+ wrmsr(MSR_IA32_APICBASE,
+ rdmsr(MSR_IA32_APICBASE) &
+ ~(MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD));
+}
+
+void enable_xapic(void)
+{
+ uint64_t val = rdmsr(MSR_IA32_APICBASE);
+
+ /* Per SDM: to enable xAPIC when in x2APIC must first disable APIC */
+ if (val & MSR_IA32_APICBASE_EXTD) {
+ disable_apic();
+ wrmsr(MSR_IA32_APICBASE,
+ rdmsr(MSR_IA32_APICBASE) | MSR_IA32_APICBASE_ENABLE);
+ } else if (!(val & MSR_IA32_APICBASE_ENABLE)) {
+ wrmsr(MSR_IA32_APICBASE, val | MSR_IA32_APICBASE_ENABLE);
+ }
+
+ /*
+ * Per SDM: reset value of spurious interrupt vector register has the
+ * APIC software enabled bit=0. It must be enabled in addition to the
+ * enable bit in the MSR.
+ */
+ val = read_apic_reg(APIC_SPIV) | APIC_SPIV_APIC_ENABLED;
+ write_apic_reg(APIC_SPIV, val);
+}
+
+void verify_apic_base_addr(void)
+{
+ uint64_t msr = rdmsr(MSR_IA32_APICBASE);
+ uint64_t base = GET_APIC_BASE(msr);
+
+ GUEST_ASSERT(base == APIC_DEFAULT_GPA);
+}
+
+static void halter_guest_code(struct test_data_page *data)
+{
+ verify_apic_base_addr();
+ enable_xapic();
+
+ data->halter_apic_id = GET_APIC_ID_FIELD(read_apic_reg(APIC_ID));
+ data->halter_lvr = read_apic_reg(APIC_LVR);
+
+ /*
+ * Loop forever HLTing and recording halts & wakes. Disable interrupts
+ * each time around to minimize window between signaling the pending
+ * halt to the sender vCPU and executing the halt. No need to disable on
+ * first run as this vCPU executes first and the host waits for it to
+ * signal going into first halt before starting the sender vCPU. Record
+ * TPR and PPR for diagnostic purposes in case the test fails.
+ */
+ for (;;) {
+ data->halter_tpr = read_apic_reg(APIC_TASKPRI);
+ data->halter_ppr = read_apic_reg(APIC_PROCPRI);
+ data->hlt_count++;
+ asm volatile("sti; hlt; cli");
+ data->wake_count++;
+ }
+}
+
+/*
+ * Runs on halter vCPU when IPI arrives. Write an arbitrary non-zero value to
+ * enable diagnosing errant writes to the APIC access address backing page in
+ * case of test failure.
+ */
+static void guest_ipi_handler(struct ex_regs *regs)
+{
+ ipis_rcvd++;
+ write_apic_reg(APIC_EOI, 77);
+}
+
+static void sender_guest_code(struct test_data_page *data)
+{
+ uint64_t last_wake_count;
+ uint64_t last_hlt_count;
+ uint64_t last_ipis_rcvd_count;
+ uint32_t icr_val;
+ uint32_t icr2_val;
+ uint64_t tsc_start;
+
+ verify_apic_base_addr();
+ enable_xapic();
+
+ /*
+ * Init interrupt command register for sending IPIs
+ *
+ * Delivery mode=fixed, per SDM:
+ * "Delivers the interrupt specified in the vector field to the target
+ * processor."
+ *
+ * Destination mode=physical i.e. specify target by its local APIC
+ * ID. This vCPU assumes that the halter vCPU has already started and
+ * set data->halter_apic_id.
+ */
+ icr_val = (APIC_DEST_PHYSICAL | APIC_DM_FIXED | IPI_VECTOR);
+ icr2_val = SET_APIC_DEST_FIELD(data->halter_apic_id);
+ data->icr = icr_val;
+ data->icr2 = icr2_val;
+
+ last_wake_count = data->wake_count;
+ last_hlt_count = data->hlt_count;
+ last_ipis_rcvd_count = ipis_rcvd;
+ for (;;) {
+ /*
+ * Send IPI to halter vCPU.
+ * First IPI can be sent unconditionally because halter vCPU
+ * starts earlier.
+ */
+ write_apic_reg(APIC_ICR2, icr2_val);
+ write_apic_reg(APIC_ICR, icr_val);
+ data->ipis_sent++;
+
+ /*
+ * Wait up to ~1 sec for halter to indicate that it has:
+ * 1. Received the IPI
+ * 2. Woken up from the halt
+ * 3. Gone back into halt
+ * Current CPUs typically run at 2.x Ghz which is ~2
+ * billion ticks per second.
+ */
+ tsc_start = rdtsc();
+ while (rdtsc() - tsc_start < 2000000000) {
+ if ((ipis_rcvd != last_ipis_rcvd_count) &&
+ (data->wake_count != last_wake_count) &&
+ (data->hlt_count != last_hlt_count))
+ break;
+ }
+
+ GUEST_ASSERT((ipis_rcvd != last_ipis_rcvd_count) &&
+ (data->wake_count != last_wake_count) &&
+ (data->hlt_count != last_hlt_count));
+
+ last_wake_count = data->wake_count;
+ last_hlt_count = data->hlt_count;
+ last_ipis_rcvd_count = ipis_rcvd;
+ }
+}
+
+static void *vcpu_thread(void *arg)
+{
+ struct thread_params *params = (struct thread_params *)arg;
+ struct ucall uc;
+ int old;
+ int r;
+ unsigned int exit_reason;
+
+ r = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);
+ TEST_ASSERT(r == 0,
+ "pthread_setcanceltype failed on vcpu_id=%u with errno=%d",
+ params->vcpu_id, r);
+
+ fprintf(stderr, "vCPU thread running vCPU %u\n", params->vcpu_id);
+ vcpu_run(params->vm, params->vcpu_id);
+ exit_reason = vcpu_state(params->vm, params->vcpu_id)->exit_reason;
+
+ TEST_ASSERT(exit_reason == KVM_EXIT_IO,
+ "vCPU %u exited with unexpected exit reason %u-%s, expected KVM_EXIT_IO",
+ params->vcpu_id, exit_reason, exit_reason_str(exit_reason));
+
+ if (get_ucall(params->vm, params->vcpu_id, &uc) == UCALL_ABORT) {
+ TEST_ASSERT(false,
+ "vCPU %u exited with error: %s.\n"
+ "Sending vCPU sent %lu IPIs to halting vCPU\n"
+ "Halting vCPU halted %lu times, woke %lu times, received %lu IPIs.\n"
+ "Halter TPR=%#x PPR=%#x LVR=%#x\n"
+ "Migrations attempted: %lu\n"
+ "Migrations completed: %lu\n",
+ params->vcpu_id, (const char *)uc.args[0],
+ params->data->ipis_sent, params->data->hlt_count,
+ params->data->wake_count,
+ *params->pipis_rcvd, params->data->halter_tpr,
+ params->data->halter_ppr, params->data->halter_lvr,
+ params->data->migrations_attempted,
+ params->data->migrations_completed);
+ }
+
+ return NULL;
+}
+
+static void cancel_join_vcpu_thread(pthread_t thread, uint32_t vcpu_id)
+{
+ void *retval;
+ int r;
+
+ r = pthread_cancel(thread);
+ TEST_ASSERT(r == 0,
+ "pthread_cancel on vcpu_id=%d failed with errno=%d",
+ vcpu_id, r);
+
+ r = pthread_join(thread, &retval);
+ TEST_ASSERT(r == 0,
+ "pthread_join on vcpu_id=%d failed with errno=%d",
+ vcpu_id, r);
+ TEST_ASSERT(retval == PTHREAD_CANCELED,
+ "expected retval=%p, got %p", PTHREAD_CANCELED,
+ retval);
+}
+
+void do_migrations(struct test_data_page *data, int run_secs, int delay_usecs,
+ uint64_t *pipis_rcvd)
+{
+ long pages_not_moved;
+ unsigned long nodemask = 0;
+ unsigned long nodemasks[sizeof(nodemask) * 8];
+ int nodes = 0;
+ time_t start_time, last_update, now;
+ time_t interval_secs = 1;
+ int i, r;
+ int from, to;
+ unsigned long bit;
+ uint64_t hlt_count;
+ uint64_t wake_count;
+ uint64_t ipis_sent;
+
+ fprintf(stderr, "Calling migrate_pages every %d microseconds\n",
+ delay_usecs);
+
+ /* Get set of first 64 numa nodes available */
+ r = get_mempolicy(NULL, &nodemask, sizeof(nodemask) * 8,
+ 0, MPOL_F_MEMS_ALLOWED);
+ TEST_ASSERT(r == 0, "get_mempolicy failed errno=%d", errno);
+
+ fprintf(stderr, "Numa nodes found amongst first %lu possible nodes "
+ "(each 1-bit indicates node is present): %#lx\n",
+ sizeof(nodemask) * 8, nodemask);
+
+ /* Init array of masks containing a single-bit in each, one for each
+ * available node. migrate_pages called below requires specifying nodes
+ * as bit masks.
+ */
+ for (i = 0, bit = 1; i < sizeof(nodemask) * 8; i++, bit <<= 1) {
+ if (nodemask & bit) {
+ nodemasks[nodes] = nodemask & bit;
+ nodes++;
+ }
+ }
+
+ TEST_ASSERT(nodes > 1,
+ "Did not find at least 2 numa nodes. Can't do migration\n");
+
+ fprintf(stderr, "Migrating amongst %d nodes found\n", nodes);
+
+ from = 0;
+ to = 1;
+ start_time = time(NULL);
+ last_update = start_time;
+
+ ipis_sent = data->ipis_sent;
+ hlt_count = data->hlt_count;
+ wake_count = data->wake_count;
+
+ while ((int)(time(NULL) - start_time) < run_secs) {
+ data->migrations_attempted++;
+
+ /*
+ * migrate_pages with PID=0 will migrate all pages of this
+ * process between the nodes specified as bitmasks. The page
+ * backing the APIC access address belongs to this process
+ * because it is allocated by KVM in the context of the
+ * KVM_CREATE_VCPU ioctl. If that assumption ever changes this
+ * test may break or give a false positive signal.
+ */
+ pages_not_moved = migrate_pages(0, sizeof(nodemasks[from]),
+ &nodemasks[from],
+ &nodemasks[to]);
+ if (pages_not_moved < 0)
+ fprintf(stderr,
+ "migrate_pages failed, errno=%d\n", errno);
+ else if (pages_not_moved > 0)
+ fprintf(stderr,
+ "migrate_pages could not move %ld pages\n",
+ pages_not_moved);
+ else
+ data->migrations_completed++;
+
+ from = to;
+ to++;
+ if (to == nodes)
+ to = 0;
+
+ now = time(NULL);
+ if (((now - start_time) % interval_secs == 0) &&
+ (now != last_update)) {
+ last_update = now;
+ fprintf(stderr,
+ "%lu seconds: Migrations attempted=%lu completed=%lu, "
+ "IPIs sent=%lu received=%lu, HLTs=%lu wakes=%lu\n",
+ now - start_time, data->migrations_attempted,
+ data->migrations_completed,
+ data->ipis_sent, *pipis_rcvd,
+ data->hlt_count, data->wake_count);
+
+ TEST_ASSERT(ipis_sent != data->ipis_sent &&
+ hlt_count != data->hlt_count &&
+ wake_count != data->wake_count,
+ "IPI, HLT and wake count have not increased "
+ "in the last %lu seconds. "
+ "HLTer is likely hung.\n", interval_secs);
+
+ ipis_sent = data->ipis_sent;
+ hlt_count = data->hlt_count;
+ wake_count = data->wake_count;
+ }
+ usleep(delay_usecs);
+ }
+}
+
+void get_cmdline_args(int argc, char *argv[], int *run_secs,
+ bool *migrate, int *delay_usecs)
+{
+ for (;;) {
+ int opt = getopt(argc, argv, "s:d:m");
+
+ if (opt == -1)
+ break;
+ switch (opt) {
+ case 's':
+ *run_secs = parse_size(optarg);
+ break;
+ case 'm':
+ *migrate = true;
+ break;
+ case 'd':
+ *delay_usecs = parse_size(optarg);
+ break;
+ default:
+ TEST_ASSERT(false,
+ "Usage: -s <runtime seconds>. Default is %d seconds.\n"
+ "-m adds calls to migrate_pages while vCPUs are running."
+ " Default is no migrations.\n"
+ "-d <delay microseconds> - delay between migrate_pages() calls."
+ " Default is %d microseconds.\n",
+ DEFAULT_RUN_SECS, DEFAULT_DELAY_USECS);
+ }
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ int r;
+ int wait_secs;
+ const int max_halter_wait = 10;
+ int run_secs = 0;
+ int delay_usecs = 0;
+ struct test_data_page *data;
+ vm_vaddr_t test_data_page_vaddr;
+ bool migrate = false;
+ pthread_t threads[2];
+ struct thread_params params[2];
+ struct kvm_vm *vm;
+ uint64_t *pipis_rcvd;
+
+ get_cmdline_args(argc, argv, &run_secs, &migrate, &delay_usecs);
+ if (run_secs <= 0)
+ run_secs = DEFAULT_RUN_SECS;
+ if (delay_usecs <= 0)
+ delay_usecs = DEFAULT_DELAY_USECS;
+
+ vm = vm_create_default(HALTER_VCPU_ID, 0, halter_guest_code);
+ params[0].vm = vm;
+ params[1].vm = vm;
+
+ vm_init_descriptor_tables(vm);
+ vcpu_init_descriptor_tables(vm, HALTER_VCPU_ID);
+ vm_handle_exception(vm, IPI_VECTOR, guest_ipi_handler);
+
+ virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA, 0);
+
+ vm_vcpu_add_default(vm, SENDER_VCPU_ID, sender_guest_code);
+
+ test_data_page_vaddr = vm_vaddr_alloc(vm, 0x1000, 0x1000, 0, 0);
+ data =
+ (struct test_data_page *)addr_gva2hva(vm, test_data_page_vaddr);
+ memset(data, 0, sizeof(*data));
+ params[0].data = data;
+ params[1].data = data;
+
+ vcpu_args_set(vm, HALTER_VCPU_ID, 1, test_data_page_vaddr);
+ vcpu_args_set(vm, SENDER_VCPU_ID, 1, test_data_page_vaddr);
+
+ pipis_rcvd = (uint64_t *)addr_gva2hva(vm, (uint64_t)&ipis_rcvd);
+ params[0].pipis_rcvd = pipis_rcvd;
+ params[1].pipis_rcvd = pipis_rcvd;
+
+ /* Start halter vCPU thread and wait for it to execute first HLT. */
+ params[0].vcpu_id = HALTER_VCPU_ID;
+ r = pthread_create(&threads[0], NULL, vcpu_thread, &params[0]);
+ TEST_ASSERT(r == 0,
+ "pthread_create halter failed errno=%d", errno);
+ fprintf(stderr, "Halter vCPU thread started\n");
+
+ wait_secs = 0;
+ while ((wait_secs < max_halter_wait) && !data->hlt_count) {
+ sleep(1);
+ wait_secs++;
+ }
+
+ TEST_ASSERT(data->hlt_count,
+ "Halter vCPU did not execute first HLT within %d seconds",
+ max_halter_wait);
+
+ fprintf(stderr,
+ "Halter vCPU thread reported its APIC ID: %u after %d seconds.\n",
+ data->halter_apic_id, wait_secs);
+
+ params[1].vcpu_id = SENDER_VCPU_ID;
+ r = pthread_create(&threads[1], NULL, vcpu_thread, &params[1]);
+ TEST_ASSERT(r == 0, "pthread_create sender failed errno=%d", errno);
+
+ fprintf(stderr,
+ "IPI sender vCPU thread started. Letting vCPUs run for %d seconds.\n",
+ run_secs);
+
+ if (!migrate)
+ sleep(run_secs);
+ else
+ do_migrations(data, run_secs, delay_usecs, pipis_rcvd);
+
+ /*
+ * Cancel threads and wait for them to stop.
+ */
+ cancel_join_vcpu_thread(threads[0], HALTER_VCPU_ID);
+ cancel_join_vcpu_thread(threads[1], SENDER_VCPU_ID);
+
+ fprintf(stderr,
+ "Test successful after running for %d seconds.\n"
+ "Sending vCPU sent %lu IPIs to halting vCPU\n"
+ "Halting vCPU halted %lu times, woke %lu times, received %lu IPIs.\n"
+ "Halter APIC ID=%#x\n"
+ "Sender ICR value=%#x ICR2 value=%#x\n"
+ "Halter TPR=%#x PPR=%#x LVR=%#x\n"
+ "Migrations attempted: %lu\n"
+ "Migrations completed: %lu\n",
+ run_secs, data->ipis_sent,
+ data->hlt_count, data->wake_count, *pipis_rcvd,
+ data->halter_apic_id,
+ data->icr, data->icr2,
+ data->halter_tpr, data->halter_ppr, data->halter_lvr,
+ data->migrations_attempted, data->migrations_completed);
+
+ kvm_vm_free(vm);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
new file mode 100644
index 000000000000..9246ea310587
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * svm_vmcall_test
+ *
+ * Copyright © 2021 Amazon.com, Inc. or its affiliates.
+ *
+ * Xen shared_info / pvclock testing
+ */
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+
+#include <stdint.h>
+#include <time.h>
+
+#define VCPU_ID 5
+
+#define SHINFO_REGION_GPA 0xc0000000ULL
+#define SHINFO_REGION_SLOT 10
+#define PAGE_SIZE 4096
+
+#define PVTIME_ADDR (SHINFO_REGION_GPA + PAGE_SIZE)
+
+static struct kvm_vm *vm;
+
+#define XEN_HYPERCALL_MSR 0x40000000
+
+struct pvclock_vcpu_time_info {
+ u32 version;
+ u32 pad0;
+ u64 tsc_timestamp;
+ u64 system_time;
+ u32 tsc_to_system_mul;
+ s8 tsc_shift;
+ u8 flags;
+ u8 pad[2];
+} __attribute__((__packed__)); /* 32 bytes */
+
+struct pvclock_wall_clock {
+ u32 version;
+ u32 sec;
+ u32 nsec;
+} __attribute__((__packed__));
+
+static void guest_code(void)
+{
+ GUEST_DONE();
+}
+
+static int cmp_timespec(struct timespec *a, struct timespec *b)
+{
+ if (a->tv_sec > b->tv_sec)
+ return 1;
+ else if (a->tv_sec < b->tv_sec)
+ return -1;
+ else if (a->tv_nsec > b->tv_nsec)
+ return 1;
+ else if (a->tv_nsec < b->tv_nsec)
+ return -1;
+ else
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ struct timespec min_ts, max_ts, vm_ts;
+
+ if (!(kvm_check_cap(KVM_CAP_XEN_HVM) &
+ KVM_XEN_HVM_CONFIG_SHARED_INFO) ) {
+ print_skip("KVM_XEN_HVM_CONFIG_SHARED_INFO not available");
+ exit(KSFT_SKIP);
+ }
+
+ clock_gettime(CLOCK_REALTIME, &min_ts);
+
+ vm = vm_create_default(VCPU_ID, 0, (void *) guest_code);
+ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+
+ /* Map a region for the shared_info page */
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
+ SHINFO_REGION_GPA, SHINFO_REGION_SLOT, 2, 0);
+
+ struct kvm_xen_hvm_config hvmc = {
+ .flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL,
+ .msr = XEN_HYPERCALL_MSR,
+ };
+ vm_ioctl(vm, KVM_XEN_HVM_CONFIG, &hvmc);
+
+ struct kvm_xen_hvm_attr lm = {
+ .type = KVM_XEN_ATTR_TYPE_LONG_MODE,
+ .u.long_mode = 1,
+ };
+ vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &lm);
+
+ struct kvm_xen_hvm_attr ha = {
+ .type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
+ .u.shared_info.gfn = SHINFO_REGION_GPA / PAGE_SIZE,
+ };
+ vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &ha);
+
+ struct kvm_xen_vcpu_attr vi = {
+ .type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO,
+ .u.gpa = SHINFO_REGION_GPA + 0x40,
+ };
+ vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &vi);
+
+ struct kvm_xen_vcpu_attr pvclock = {
+ .type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO,
+ .u.gpa = PVTIME_ADDR,
+ };
+ vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &pvclock);
+
+ for (;;) {
+ volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct ucall uc;
+
+ vcpu_run(vm, VCPU_ID);
+
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_ABORT:
+ TEST_FAIL("%s", (const char *)uc.args[0]);
+ /* NOT REACHED */
+ case UCALL_SYNC:
+ break;
+ case UCALL_DONE:
+ goto done;
+ default:
+ TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
+ }
+ }
+
+ done:
+ clock_gettime(CLOCK_REALTIME, &max_ts);
+
+ /*
+ * Just a *really* basic check that things are being put in the
+ * right place. The actual calculations are much the same for
+ * Xen as they are for the KVM variants, so no need to check.
+ */
+ struct pvclock_wall_clock *wc;
+ struct pvclock_vcpu_time_info *ti, *ti2;
+
+ wc = addr_gpa2hva(vm, SHINFO_REGION_GPA + 0xc00);
+ ti = addr_gpa2hva(vm, SHINFO_REGION_GPA + 0x40 + 0x20);
+ ti2 = addr_gpa2hva(vm, PVTIME_ADDR);
+
+ vm_ts.tv_sec = wc->sec;
+ vm_ts.tv_nsec = wc->nsec;
+ TEST_ASSERT(wc->version && !(wc->version & 1),
+ "Bad wallclock version %x", wc->version);
+ TEST_ASSERT(cmp_timespec(&min_ts, &vm_ts) <= 0, "VM time too old");
+ TEST_ASSERT(cmp_timespec(&max_ts, &vm_ts) >= 0, "VM time too new");
+
+ TEST_ASSERT(ti->version && !(ti->version & 1),
+ "Bad time_info version %x", ti->version);
+ TEST_ASSERT(ti2->version && !(ti2->version & 1),
+ "Bad time_info version %x", ti->version);
+
+ kvm_vm_free(vm);
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
new file mode 100644
index 000000000000..8389e0bfd711
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * xen_vmcall_test
+ *
+ * Copyright © 2020 Amazon.com, Inc. or its affiliates.
+ *
+ * Userspace hypercall testing
+ */
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+
+#define VCPU_ID 5
+
+#define HCALL_REGION_GPA 0xc0000000ULL
+#define HCALL_REGION_SLOT 10
+#define PAGE_SIZE 4096
+
+static struct kvm_vm *vm;
+
+#define INPUTVALUE 17
+#define ARGVALUE(x) (0xdeadbeef5a5a0000UL + x)
+#define RETVALUE 0xcafef00dfbfbffffUL
+
+#define XEN_HYPERCALL_MSR 0x40000200
+#define HV_GUEST_OS_ID_MSR 0x40000000
+#define HV_HYPERCALL_MSR 0x40000001
+
+#define HVCALL_SIGNAL_EVENT 0x005d
+#define HV_STATUS_INVALID_ALIGNMENT 4
+
+static void guest_code(void)
+{
+ unsigned long rax = INPUTVALUE;
+ unsigned long rdi = ARGVALUE(1);
+ unsigned long rsi = ARGVALUE(2);
+ unsigned long rdx = ARGVALUE(3);
+ unsigned long rcx;
+ register unsigned long r10 __asm__("r10") = ARGVALUE(4);
+ register unsigned long r8 __asm__("r8") = ARGVALUE(5);
+ register unsigned long r9 __asm__("r9") = ARGVALUE(6);
+
+ /* First a direct invocation of 'vmcall' */
+ __asm__ __volatile__("vmcall" :
+ "=a"(rax) :
+ "a"(rax), "D"(rdi), "S"(rsi), "d"(rdx),
+ "r"(r10), "r"(r8), "r"(r9));
+ GUEST_ASSERT(rax == RETVALUE);
+
+ /* Fill in the Xen hypercall page */
+ __asm__ __volatile__("wrmsr" : : "c" (XEN_HYPERCALL_MSR),
+ "a" (HCALL_REGION_GPA & 0xffffffff),
+ "d" (HCALL_REGION_GPA >> 32));
+
+ /* Set Hyper-V Guest OS ID */
+ __asm__ __volatile__("wrmsr" : : "c" (HV_GUEST_OS_ID_MSR),
+ "a" (0x5a), "d" (0));
+
+ /* Hyper-V hypercall page */
+ u64 msrval = HCALL_REGION_GPA + PAGE_SIZE + 1;
+ __asm__ __volatile__("wrmsr" : : "c" (HV_HYPERCALL_MSR),
+ "a" (msrval & 0xffffffff),
+ "d" (msrval >> 32));
+
+ /* Invoke a Xen hypercall */
+ __asm__ __volatile__("call *%1" : "=a"(rax) :
+ "r"(HCALL_REGION_GPA + INPUTVALUE * 32),
+ "a"(rax), "D"(rdi), "S"(rsi), "d"(rdx),
+ "r"(r10), "r"(r8), "r"(r9));
+ GUEST_ASSERT(rax == RETVALUE);
+
+ /* Invoke a Hyper-V hypercall */
+ rax = 0;
+ rcx = HVCALL_SIGNAL_EVENT; /* code */
+ rdx = 0x5a5a5a5a; /* ingpa (badly aligned) */
+ __asm__ __volatile__("call *%1" : "=a"(rax) :
+ "r"(HCALL_REGION_GPA + PAGE_SIZE),
+ "a"(rax), "c"(rcx), "d"(rdx),
+ "r"(r8));
+ GUEST_ASSERT(rax == HV_STATUS_INVALID_ALIGNMENT);
+
+ GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+ if (!(kvm_check_cap(KVM_CAP_XEN_HVM) &
+ KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) ) {
+ print_skip("KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL not available");
+ exit(KSFT_SKIP);
+ }
+
+ vm = vm_create_default(VCPU_ID, 0, (void *) guest_code);
+ vcpu_set_hv_cpuid(vm, VCPU_ID);
+
+ struct kvm_xen_hvm_config hvmc = {
+ .flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL,
+ .msr = XEN_HYPERCALL_MSR,
+ };
+ vm_ioctl(vm, KVM_XEN_HVM_CONFIG, &hvmc);
+
+ /* Map a region for the hypercall pages */
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
+ HCALL_REGION_GPA, HCALL_REGION_SLOT, 2, 0);
+ virt_map(vm, HCALL_REGION_GPA, HCALL_REGION_GPA, 2, 0);
+
+ for (;;) {
+ volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct ucall uc;
+
+ vcpu_run(vm, VCPU_ID);
+
+ if (run->exit_reason == KVM_EXIT_XEN) {
+ ASSERT_EQ(run->xen.type, KVM_EXIT_XEN_HCALL);
+ ASSERT_EQ(run->xen.u.hcall.cpl, 0);
+ ASSERT_EQ(run->xen.u.hcall.longmode, 1);
+ ASSERT_EQ(run->xen.u.hcall.input, INPUTVALUE);
+ ASSERT_EQ(run->xen.u.hcall.params[0], ARGVALUE(1));
+ ASSERT_EQ(run->xen.u.hcall.params[1], ARGVALUE(2));
+ ASSERT_EQ(run->xen.u.hcall.params[2], ARGVALUE(3));
+ ASSERT_EQ(run->xen.u.hcall.params[3], ARGVALUE(4));
+ ASSERT_EQ(run->xen.u.hcall.params[4], ARGVALUE(5));
+ ASSERT_EQ(run->xen.u.hcall.params[5], ARGVALUE(6));
+ run->xen.u.hcall.result = RETVALUE;
+ continue;
+ }
+
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_ABORT:
+ TEST_FAIL("%s", (const char *)uc.args[0]);
+ /* NOT REACHED */
+ case UCALL_SYNC:
+ break;
+ case UCALL_DONE:
+ goto done;
+ default:
+ TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
+ }
+ }
+done:
+ kvm_vm_free(vm);
+ return 0;
+}
diff --git a/tools/testing/selftests/mount_setattr/.gitignore b/tools/testing/selftests/mount_setattr/.gitignore
new file mode 100644
index 000000000000..5f74d8488472
--- /dev/null
+++ b/tools/testing/selftests/mount_setattr/.gitignore
@@ -0,0 +1 @@
+mount_setattr_test
diff --git a/tools/testing/selftests/mount_setattr/Makefile b/tools/testing/selftests/mount_setattr/Makefile
new file mode 100644
index 000000000000..2250f7dcb81e
--- /dev/null
+++ b/tools/testing/selftests/mount_setattr/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for mount selftests.
+CFLAGS = -g -I../../../../usr/include/ -Wall -O2 -pthread
+
+TEST_GEN_FILES += mount_setattr_test
+
+include ../lib.mk
diff --git a/tools/testing/selftests/mount_setattr/config b/tools/testing/selftests/mount_setattr/config
new file mode 100644
index 000000000000..416bd53ce982
--- /dev/null
+++ b/tools/testing/selftests/mount_setattr/config
@@ -0,0 +1 @@
+CONFIG_USER_NS=y
diff --git a/tools/testing/selftests/mount_setattr/mount_setattr_test.c b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
new file mode 100644
index 000000000000..4e94e566e040
--- /dev/null
+++ b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
@@ -0,0 +1,1424 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <sched.h>
+#include <stdio.h>
+#include <errno.h>
+#include <pthread.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/mount.h>
+#include <sys/wait.h>
+#include <sys/vfs.h>
+#include <sys/statvfs.h>
+#include <sys/sysinfo.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <grp.h>
+#include <stdbool.h>
+#include <stdarg.h>
+
+#include "../kselftest_harness.h"
+
+#ifndef CLONE_NEWNS
+#define CLONE_NEWNS 0x00020000
+#endif
+
+#ifndef CLONE_NEWUSER
+#define CLONE_NEWUSER 0x10000000
+#endif
+
+#ifndef MS_REC
+#define MS_REC 16384
+#endif
+
+#ifndef MS_RELATIME
+#define MS_RELATIME (1 << 21)
+#endif
+
+#ifndef MS_STRICTATIME
+#define MS_STRICTATIME (1 << 24)
+#endif
+
+#ifndef MOUNT_ATTR_RDONLY
+#define MOUNT_ATTR_RDONLY 0x00000001
+#endif
+
+#ifndef MOUNT_ATTR_NOSUID
+#define MOUNT_ATTR_NOSUID 0x00000002
+#endif
+
+#ifndef MOUNT_ATTR_NOEXEC
+#define MOUNT_ATTR_NOEXEC 0x00000008
+#endif
+
+#ifndef MOUNT_ATTR_NODIRATIME
+#define MOUNT_ATTR_NODIRATIME 0x00000080
+#endif
+
+#ifndef MOUNT_ATTR__ATIME
+#define MOUNT_ATTR__ATIME 0x00000070
+#endif
+
+#ifndef MOUNT_ATTR_RELATIME
+#define MOUNT_ATTR_RELATIME 0x00000000
+#endif
+
+#ifndef MOUNT_ATTR_NOATIME
+#define MOUNT_ATTR_NOATIME 0x00000010
+#endif
+
+#ifndef MOUNT_ATTR_STRICTATIME
+#define MOUNT_ATTR_STRICTATIME 0x00000020
+#endif
+
+#ifndef AT_RECURSIVE
+#define AT_RECURSIVE 0x8000
+#endif
+
+#ifndef MS_SHARED
+#define MS_SHARED (1 << 20)
+#endif
+
+#define DEFAULT_THREADS 4
+#define ptr_to_int(p) ((int)((intptr_t)(p)))
+#define int_to_ptr(u) ((void *)((intptr_t)(u)))
+
+#ifndef __NR_mount_setattr
+ #if defined __alpha__
+ #define __NR_mount_setattr 552
+ #elif defined _MIPS_SIM
+ #if _MIPS_SIM == _MIPS_SIM_ABI32 /* o32 */
+ #define __NR_mount_setattr (442 + 4000)
+ #endif
+ #if _MIPS_SIM == _MIPS_SIM_NABI32 /* n32 */
+ #define __NR_mount_setattr (442 + 6000)
+ #endif
+ #if _MIPS_SIM == _MIPS_SIM_ABI64 /* n64 */
+ #define __NR_mount_setattr (442 + 5000)
+ #endif
+ #elif defined __ia64__
+ #define __NR_mount_setattr (442 + 1024)
+ #else
+ #define __NR_mount_setattr 442
+ #endif
+
+struct mount_attr {
+ __u64 attr_set;
+ __u64 attr_clr;
+ __u64 propagation;
+ __u64 userns_fd;
+};
+#endif
+
+#ifndef __NR_open_tree
+ #if defined __alpha__
+ #define __NR_open_tree 538
+ #elif defined _MIPS_SIM
+ #if _MIPS_SIM == _MIPS_SIM_ABI32 /* o32 */
+ #define __NR_open_tree 4428
+ #endif
+ #if _MIPS_SIM == _MIPS_SIM_NABI32 /* n32 */
+ #define __NR_open_tree 6428
+ #endif
+ #if _MIPS_SIM == _MIPS_SIM_ABI64 /* n64 */
+ #define __NR_open_tree 5428
+ #endif
+ #elif defined __ia64__
+ #define __NR_open_tree (428 + 1024)
+ #else
+ #define __NR_open_tree 428
+ #endif
+#endif
+
+#ifndef MOUNT_ATTR_IDMAP
+#define MOUNT_ATTR_IDMAP 0x00100000
+#endif
+
+static inline int sys_mount_setattr(int dfd, const char *path, unsigned int flags,
+ struct mount_attr *attr, size_t size)
+{
+ return syscall(__NR_mount_setattr, dfd, path, flags, attr, size);
+}
+
+#ifndef OPEN_TREE_CLONE
+#define OPEN_TREE_CLONE 1
+#endif
+
+#ifndef OPEN_TREE_CLOEXEC
+#define OPEN_TREE_CLOEXEC O_CLOEXEC
+#endif
+
+#ifndef AT_RECURSIVE
+#define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */
+#endif
+
+static inline int sys_open_tree(int dfd, const char *filename, unsigned int flags)
+{
+ return syscall(__NR_open_tree, dfd, filename, flags);
+}
+
+static ssize_t write_nointr(int fd, const void *buf, size_t count)
+{
+ ssize_t ret;
+
+ do {
+ ret = write(fd, buf, count);
+ } while (ret < 0 && errno == EINTR);
+
+ return ret;
+}
+
+static int write_file(const char *path, const void *buf, size_t count)
+{
+ int fd;
+ ssize_t ret;
+
+ fd = open(path, O_WRONLY | O_CLOEXEC | O_NOCTTY | O_NOFOLLOW);
+ if (fd < 0)
+ return -1;
+
+ ret = write_nointr(fd, buf, count);
+ close(fd);
+ if (ret < 0 || (size_t)ret != count)
+ return -1;
+
+ return 0;
+}
+
+static int create_and_enter_userns(void)
+{
+ uid_t uid;
+ gid_t gid;
+ char map[100];
+
+ uid = getuid();
+ gid = getgid();
+
+ if (unshare(CLONE_NEWUSER))
+ return -1;
+
+ if (write_file("/proc/self/setgroups", "deny", sizeof("deny") - 1) &&
+ errno != ENOENT)
+ return -1;
+
+ snprintf(map, sizeof(map), "0 %d 1", uid);
+ if (write_file("/proc/self/uid_map", map, strlen(map)))
+ return -1;
+
+
+ snprintf(map, sizeof(map), "0 %d 1", gid);
+ if (write_file("/proc/self/gid_map", map, strlen(map)))
+ return -1;
+
+ if (setgid(0))
+ return -1;
+
+ if (setuid(0))
+ return -1;
+
+ return 0;
+}
+
+static int prepare_unpriv_mountns(void)
+{
+ if (create_and_enter_userns())
+ return -1;
+
+ if (unshare(CLONE_NEWNS))
+ return -1;
+
+ if (mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0))
+ return -1;
+
+ return 0;
+}
+
+static int read_mnt_flags(const char *path)
+{
+ int ret;
+ struct statvfs stat;
+ unsigned int mnt_flags;
+
+ ret = statvfs(path, &stat);
+ if (ret != 0)
+ return -EINVAL;
+
+ if (stat.f_flag &
+ ~(ST_RDONLY | ST_NOSUID | ST_NODEV | ST_NOEXEC | ST_NOATIME |
+ ST_NODIRATIME | ST_RELATIME | ST_SYNCHRONOUS | ST_MANDLOCK))
+ return -EINVAL;
+
+ mnt_flags = 0;
+ if (stat.f_flag & ST_RDONLY)
+ mnt_flags |= MS_RDONLY;
+ if (stat.f_flag & ST_NOSUID)
+ mnt_flags |= MS_NOSUID;
+ if (stat.f_flag & ST_NODEV)
+ mnt_flags |= MS_NODEV;
+ if (stat.f_flag & ST_NOEXEC)
+ mnt_flags |= MS_NOEXEC;
+ if (stat.f_flag & ST_NOATIME)
+ mnt_flags |= MS_NOATIME;
+ if (stat.f_flag & ST_NODIRATIME)
+ mnt_flags |= MS_NODIRATIME;
+ if (stat.f_flag & ST_RELATIME)
+ mnt_flags |= MS_RELATIME;
+ if (stat.f_flag & ST_SYNCHRONOUS)
+ mnt_flags |= MS_SYNCHRONOUS;
+ if (stat.f_flag & ST_MANDLOCK)
+ mnt_flags |= ST_MANDLOCK;
+
+ return mnt_flags;
+}
+
+static char *get_field(char *src, int nfields)
+{
+ int i;
+ char *p = src;
+
+ for (i = 0; i < nfields; i++) {
+ while (*p && *p != ' ' && *p != '\t')
+ p++;
+
+ if (!*p)
+ break;
+
+ p++;
+ }
+
+ return p;
+}
+
+static void null_endofword(char *word)
+{
+ while (*word && *word != ' ' && *word != '\t')
+ word++;
+ *word = '\0';
+}
+
+static bool is_shared_mount(const char *path)
+{
+ size_t len = 0;
+ char *line = NULL;
+ FILE *f = NULL;
+
+ f = fopen("/proc/self/mountinfo", "re");
+ if (!f)
+ return false;
+
+ while (getline(&line, &len, f) != -1) {
+ char *opts, *target;
+
+ target = get_field(line, 4);
+ if (!target)
+ continue;
+
+ opts = get_field(target, 2);
+ if (!opts)
+ continue;
+
+ null_endofword(target);
+
+ if (strcmp(target, path) != 0)
+ continue;
+
+ null_endofword(opts);
+ if (strstr(opts, "shared:"))
+ return true;
+ }
+
+ free(line);
+ fclose(f);
+
+ return false;
+}
+
+static void *mount_setattr_thread(void *data)
+{
+ struct mount_attr attr = {
+ .attr_set = MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID,
+ .attr_clr = 0,
+ .propagation = MS_SHARED,
+ };
+
+ if (sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)))
+ pthread_exit(int_to_ptr(-1));
+
+ pthread_exit(int_to_ptr(0));
+}
+
+/* Attempt to de-conflict with the selftests tree. */
+#ifndef SKIP
+#define SKIP(s, ...) XFAIL(s, ##__VA_ARGS__)
+#endif
+
+static bool mount_setattr_supported(void)
+{
+ int ret;
+
+ ret = sys_mount_setattr(-EBADF, "", AT_EMPTY_PATH, NULL, 0);
+ if (ret < 0 && errno == ENOSYS)
+ return false;
+
+ return true;
+}
+
+FIXTURE(mount_setattr) {
+};
+
+FIXTURE_SETUP(mount_setattr)
+{
+ if (!mount_setattr_supported())
+ SKIP(return, "mount_setattr syscall not supported");
+
+ ASSERT_EQ(prepare_unpriv_mountns(), 0);
+
+ (void)umount2("/mnt", MNT_DETACH);
+ (void)umount2("/tmp", MNT_DETACH);
+
+ ASSERT_EQ(mount("testing", "/tmp", "tmpfs", MS_NOATIME | MS_NODEV,
+ "size=100000,mode=700"), 0);
+
+ ASSERT_EQ(mkdir("/tmp/B", 0777), 0);
+
+ ASSERT_EQ(mount("testing", "/tmp/B", "tmpfs", MS_NOATIME | MS_NODEV,
+ "size=100000,mode=700"), 0);
+
+ ASSERT_EQ(mkdir("/tmp/B/BB", 0777), 0);
+
+ ASSERT_EQ(mount("testing", "/tmp/B/BB", "tmpfs", MS_NOATIME | MS_NODEV,
+ "size=100000,mode=700"), 0);
+
+ ASSERT_EQ(mount("testing", "/mnt", "tmpfs", MS_NOATIME | MS_NODEV,
+ "size=100000,mode=700"), 0);
+
+ ASSERT_EQ(mkdir("/mnt/A", 0777), 0);
+
+ ASSERT_EQ(mount("testing", "/mnt/A", "tmpfs", MS_NOATIME | MS_NODEV,
+ "size=100000,mode=700"), 0);
+
+ ASSERT_EQ(mkdir("/mnt/A/AA", 0777), 0);
+
+ ASSERT_EQ(mount("/tmp", "/mnt/A/AA", NULL, MS_BIND | MS_REC, NULL), 0);
+
+ ASSERT_EQ(mkdir("/mnt/B", 0777), 0);
+
+ ASSERT_EQ(mount("testing", "/mnt/B", "ramfs",
+ MS_NOATIME | MS_NODEV | MS_NOSUID, 0), 0);
+
+ ASSERT_EQ(mkdir("/mnt/B/BB", 0777), 0);
+
+ ASSERT_EQ(mount("testing", "/tmp/B/BB", "devpts",
+ MS_RELATIME | MS_NOEXEC | MS_RDONLY, 0), 0);
+}
+
+FIXTURE_TEARDOWN(mount_setattr)
+{
+ if (!mount_setattr_supported())
+ SKIP(return, "mount_setattr syscall not supported");
+
+ (void)umount2("/mnt/A", MNT_DETACH);
+ (void)umount2("/tmp", MNT_DETACH);
+}
+
+TEST_F(mount_setattr, invalid_attributes)
+{
+ struct mount_attr invalid_attr = {
+ .attr_set = (1U << 31),
+ };
+
+ if (!mount_setattr_supported())
+ SKIP(return, "mount_setattr syscall not supported");
+
+ ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &invalid_attr,
+ sizeof(invalid_attr)), 0);
+
+ invalid_attr.attr_set = 0;
+ invalid_attr.attr_clr = (1U << 31);
+ ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &invalid_attr,
+ sizeof(invalid_attr)), 0);
+
+ invalid_attr.attr_clr = 0;
+ invalid_attr.propagation = (1U << 31);
+ ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &invalid_attr,
+ sizeof(invalid_attr)), 0);
+
+ invalid_attr.attr_set = (1U << 31);
+ invalid_attr.attr_clr = (1U << 31);
+ invalid_attr.propagation = (1U << 31);
+ ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &invalid_attr,
+ sizeof(invalid_attr)), 0);
+
+ ASSERT_NE(sys_mount_setattr(-1, "mnt/A", AT_RECURSIVE, &invalid_attr,
+ sizeof(invalid_attr)), 0);
+}
+
+TEST_F(mount_setattr, extensibility)
+{
+ unsigned int old_flags = 0, new_flags = 0, expected_flags = 0;
+ char *s = "dummy";
+ struct mount_attr invalid_attr = {};
+ struct mount_attr_large {
+ struct mount_attr attr1;
+ struct mount_attr attr2;
+ struct mount_attr attr3;
+ } large_attr = {};
+
+ if (!mount_setattr_supported())
+ SKIP(return, "mount_setattr syscall not supported");
+
+ old_flags = read_mnt_flags("/mnt/A");
+ ASSERT_GT(old_flags, 0);
+
+ ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, NULL,
+ sizeof(invalid_attr)), 0);
+ ASSERT_EQ(errno, EFAULT);
+
+ ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, (void *)s,
+ sizeof(invalid_attr)), 0);
+ ASSERT_EQ(errno, EINVAL);
+
+ ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &invalid_attr, 0), 0);
+ ASSERT_EQ(errno, EINVAL);
+
+ ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &invalid_attr,
+ sizeof(invalid_attr) / 2), 0);
+ ASSERT_EQ(errno, EINVAL);
+
+ ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &invalid_attr,
+ sizeof(invalid_attr) / 2), 0);
+ ASSERT_EQ(errno, EINVAL);
+
+ ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE,
+ (void *)&large_attr, sizeof(large_attr)), 0);
+
+ large_attr.attr3.attr_set = MOUNT_ATTR_RDONLY;
+ ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE,
+ (void *)&large_attr, sizeof(large_attr)), 0);
+
+ large_attr.attr3.attr_set = 0;
+ large_attr.attr1.attr_set = MOUNT_ATTR_RDONLY;
+ ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE,
+ (void *)&large_attr, sizeof(large_attr)), 0);
+
+ expected_flags = old_flags;
+ expected_flags |= MS_RDONLY;
+
+ new_flags = read_mnt_flags("/mnt/A");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
+ ASSERT_EQ(new_flags, expected_flags);
+}
+
+TEST_F(mount_setattr, basic)
+{
+ unsigned int old_flags = 0, new_flags = 0, expected_flags = 0;
+ struct mount_attr attr = {
+ .attr_set = MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOEXEC | MOUNT_ATTR_RELATIME,
+ .attr_clr = MOUNT_ATTR__ATIME,
+ };
+
+ if (!mount_setattr_supported())
+ SKIP(return, "mount_setattr syscall not supported");
+
+ old_flags = read_mnt_flags("/mnt/A");
+ ASSERT_GT(old_flags, 0);
+
+ ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", 0, &attr, sizeof(attr)), 0);
+
+ expected_flags = old_flags;
+ expected_flags |= MS_RDONLY;
+ expected_flags |= MS_NOEXEC;
+ expected_flags &= ~MS_NOATIME;
+ expected_flags |= MS_RELATIME;
+
+ new_flags = read_mnt_flags("/mnt/A");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA");
+ ASSERT_EQ(new_flags, old_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B");
+ ASSERT_EQ(new_flags, old_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
+ ASSERT_EQ(new_flags, old_flags);
+}
+
+TEST_F(mount_setattr, basic_recursive)
+{
+ int fd;
+ unsigned int old_flags = 0, new_flags = 0, expected_flags = 0;
+ struct mount_attr attr = {
+ .attr_set = MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOEXEC | MOUNT_ATTR_RELATIME,
+ .attr_clr = MOUNT_ATTR__ATIME,
+ };
+
+ if (!mount_setattr_supported())
+ SKIP(return, "mount_setattr syscall not supported");
+
+ old_flags = read_mnt_flags("/mnt/A");
+ ASSERT_GT(old_flags, 0);
+
+ ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
+
+ expected_flags = old_flags;
+ expected_flags |= MS_RDONLY;
+ expected_flags |= MS_NOEXEC;
+ expected_flags &= ~MS_NOATIME;
+ expected_flags |= MS_RELATIME;
+
+ new_flags = read_mnt_flags("/mnt/A");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ memset(&attr, 0, sizeof(attr));
+ attr.attr_clr = MOUNT_ATTR_RDONLY;
+ attr.propagation = MS_SHARED;
+ ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
+
+ expected_flags &= ~MS_RDONLY;
+ new_flags = read_mnt_flags("/mnt/A");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ ASSERT_EQ(is_shared_mount("/mnt/A"), true);
+
+ new_flags = read_mnt_flags("/mnt/A/AA");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ ASSERT_EQ(is_shared_mount("/mnt/A/AA"), true);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ ASSERT_EQ(is_shared_mount("/mnt/A/AA/B"), true);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ ASSERT_EQ(is_shared_mount("/mnt/A/AA/B/BB"), true);
+
+ fd = open("/mnt/A/AA/B/b", O_RDWR | O_CLOEXEC | O_CREAT | O_EXCL, 0777);
+ ASSERT_GE(fd, 0);
+
+ /*
+ * We're holding a fd open for writing so this needs to fail somewhere
+ * in the middle and the mount options need to be unchanged.
+ */
+ attr.attr_set = MOUNT_ATTR_RDONLY;
+ ASSERT_LT(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
+
+ new_flags = read_mnt_flags("/mnt/A");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ ASSERT_EQ(is_shared_mount("/mnt/A"), true);
+
+ new_flags = read_mnt_flags("/mnt/A/AA");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ ASSERT_EQ(is_shared_mount("/mnt/A/AA"), true);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ ASSERT_EQ(is_shared_mount("/mnt/A/AA/B"), true);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ ASSERT_EQ(is_shared_mount("/mnt/A/AA/B/BB"), true);
+
+ EXPECT_EQ(close(fd), 0);
+}
+
+TEST_F(mount_setattr, mount_has_writers)
+{
+ int fd, dfd;
+ unsigned int old_flags = 0, new_flags = 0;
+ struct mount_attr attr = {
+ .attr_set = MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOEXEC | MOUNT_ATTR_RELATIME,
+ .attr_clr = MOUNT_ATTR__ATIME,
+ .propagation = MS_SHARED,
+ };
+
+ if (!mount_setattr_supported())
+ SKIP(return, "mount_setattr syscall not supported");
+
+ old_flags = read_mnt_flags("/mnt/A");
+ ASSERT_GT(old_flags, 0);
+
+ fd = open("/mnt/A/AA/B/b", O_RDWR | O_CLOEXEC | O_CREAT | O_EXCL, 0777);
+ ASSERT_GE(fd, 0);
+
+ /*
+ * We're holding a fd open to a mount somwhere in the middle so this
+ * needs to fail somewhere in the middle. After this the mount options
+ * need to be unchanged.
+ */
+ ASSERT_LT(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
+
+ new_flags = read_mnt_flags("/mnt/A");
+ ASSERT_EQ(new_flags, old_flags);
+
+ ASSERT_EQ(is_shared_mount("/mnt/A"), false);
+
+ new_flags = read_mnt_flags("/mnt/A/AA");
+ ASSERT_EQ(new_flags, old_flags);
+
+ ASSERT_EQ(is_shared_mount("/mnt/A/AA"), false);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B");
+ ASSERT_EQ(new_flags, old_flags);
+
+ ASSERT_EQ(is_shared_mount("/mnt/A/AA/B"), false);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
+ ASSERT_EQ(new_flags, old_flags);
+
+ ASSERT_EQ(is_shared_mount("/mnt/A/AA/B/BB"), false);
+
+ dfd = open("/mnt/A/AA/B", O_DIRECTORY | O_CLOEXEC);
+ ASSERT_GE(dfd, 0);
+ EXPECT_EQ(fsync(dfd), 0);
+ EXPECT_EQ(close(dfd), 0);
+
+ EXPECT_EQ(fsync(fd), 0);
+ EXPECT_EQ(close(fd), 0);
+
+ /* All writers are gone so this should succeed. */
+ ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
+}
+
+TEST_F(mount_setattr, mixed_mount_options)
+{
+ unsigned int old_flags1 = 0, old_flags2 = 0, new_flags = 0, expected_flags = 0;
+ struct mount_attr attr = {
+ .attr_clr = MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID | MOUNT_ATTR_NOEXEC | MOUNT_ATTR__ATIME,
+ .attr_set = MOUNT_ATTR_RELATIME,
+ };
+
+ if (!mount_setattr_supported())
+ SKIP(return, "mount_setattr syscall not supported");
+
+ old_flags1 = read_mnt_flags("/mnt/B");
+ ASSERT_GT(old_flags1, 0);
+
+ old_flags2 = read_mnt_flags("/mnt/B/BB");
+ ASSERT_GT(old_flags2, 0);
+
+ ASSERT_EQ(sys_mount_setattr(-1, "/mnt/B", AT_RECURSIVE, &attr, sizeof(attr)), 0);
+
+ expected_flags = old_flags2;
+ expected_flags &= ~(MS_RDONLY | MS_NOEXEC | MS_NOATIME | MS_NOSUID);
+ expected_flags |= MS_RELATIME;
+
+ new_flags = read_mnt_flags("/mnt/B");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ expected_flags = old_flags2;
+ expected_flags &= ~(MS_RDONLY | MS_NOEXEC | MS_NOATIME | MS_NOSUID);
+ expected_flags |= MS_RELATIME;
+
+ new_flags = read_mnt_flags("/mnt/B/BB");
+ ASSERT_EQ(new_flags, expected_flags);
+}
+
+TEST_F(mount_setattr, time_changes)
+{
+ unsigned int old_flags = 0, new_flags = 0, expected_flags = 0;
+ struct mount_attr attr = {
+ .attr_set = MOUNT_ATTR_NODIRATIME | MOUNT_ATTR_NOATIME,
+ };
+
+ if (!mount_setattr_supported())
+ SKIP(return, "mount_setattr syscall not supported");
+
+ ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
+
+ attr.attr_set = MOUNT_ATTR_STRICTATIME;
+ ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
+
+ attr.attr_set = MOUNT_ATTR_STRICTATIME | MOUNT_ATTR_NOATIME;
+ ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
+
+ attr.attr_set = MOUNT_ATTR_STRICTATIME | MOUNT_ATTR_NOATIME;
+ attr.attr_clr = MOUNT_ATTR__ATIME;
+ ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
+
+ attr.attr_set = 0;
+ attr.attr_clr = MOUNT_ATTR_STRICTATIME;
+ ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
+
+ attr.attr_clr = MOUNT_ATTR_NOATIME;
+ ASSERT_NE(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
+
+ old_flags = read_mnt_flags("/mnt/A");
+ ASSERT_GT(old_flags, 0);
+
+ attr.attr_set = MOUNT_ATTR_NODIRATIME | MOUNT_ATTR_NOATIME;
+ attr.attr_clr = MOUNT_ATTR__ATIME;
+ ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
+
+ expected_flags = old_flags;
+ expected_flags |= MS_NOATIME;
+ expected_flags |= MS_NODIRATIME;
+
+ new_flags = read_mnt_flags("/mnt/A");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ memset(&attr, 0, sizeof(attr));
+ attr.attr_set &= ~MOUNT_ATTR_NOATIME;
+ attr.attr_set |= MOUNT_ATTR_RELATIME;
+ attr.attr_clr |= MOUNT_ATTR__ATIME;
+ ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
+
+ expected_flags &= ~MS_NOATIME;
+ expected_flags |= MS_RELATIME;
+
+ new_flags = read_mnt_flags("/mnt/A");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ memset(&attr, 0, sizeof(attr));
+ attr.attr_set &= ~MOUNT_ATTR_RELATIME;
+ attr.attr_set |= MOUNT_ATTR_STRICTATIME;
+ attr.attr_clr |= MOUNT_ATTR__ATIME;
+ ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
+
+ expected_flags &= ~MS_RELATIME;
+
+ new_flags = read_mnt_flags("/mnt/A");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ memset(&attr, 0, sizeof(attr));
+ attr.attr_set &= ~MOUNT_ATTR_STRICTATIME;
+ attr.attr_set |= MOUNT_ATTR_NOATIME;
+ attr.attr_clr |= MOUNT_ATTR__ATIME;
+ ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
+
+ expected_flags |= MS_NOATIME;
+ new_flags = read_mnt_flags("/mnt/A");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ memset(&attr, 0, sizeof(attr));
+ ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
+
+ new_flags = read_mnt_flags("/mnt/A");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ memset(&attr, 0, sizeof(attr));
+ attr.attr_clr = MOUNT_ATTR_NODIRATIME;
+ ASSERT_EQ(sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr)), 0);
+
+ expected_flags &= ~MS_NODIRATIME;
+
+ new_flags = read_mnt_flags("/mnt/A");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
+ ASSERT_EQ(new_flags, expected_flags);
+}
+
+TEST_F(mount_setattr, multi_threaded)
+{
+ int i, j, nthreads, ret = 0;
+ unsigned int old_flags = 0, new_flags = 0, expected_flags = 0;
+ pthread_attr_t pattr;
+ pthread_t threads[DEFAULT_THREADS];
+
+ if (!mount_setattr_supported())
+ SKIP(return, "mount_setattr syscall not supported");
+
+ old_flags = read_mnt_flags("/mnt/A");
+ ASSERT_GT(old_flags, 0);
+
+ /* Try to change mount options from multiple threads. */
+ nthreads = get_nprocs_conf();
+ if (nthreads > DEFAULT_THREADS)
+ nthreads = DEFAULT_THREADS;
+
+ pthread_attr_init(&pattr);
+ for (i = 0; i < nthreads; i++)
+ ASSERT_EQ(pthread_create(&threads[i], &pattr, mount_setattr_thread, NULL), 0);
+
+ for (j = 0; j < i; j++) {
+ void *retptr = NULL;
+
+ EXPECT_EQ(pthread_join(threads[j], &retptr), 0);
+
+ ret += ptr_to_int(retptr);
+ EXPECT_EQ(ret, 0);
+ }
+ pthread_attr_destroy(&pattr);
+
+ ASSERT_EQ(ret, 0);
+
+ expected_flags = old_flags;
+ expected_flags |= MS_RDONLY;
+ expected_flags |= MS_NOSUID;
+ new_flags = read_mnt_flags("/mnt/A");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ ASSERT_EQ(is_shared_mount("/mnt/A"), true);
+
+ new_flags = read_mnt_flags("/mnt/A/AA");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ ASSERT_EQ(is_shared_mount("/mnt/A/AA"), true);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ ASSERT_EQ(is_shared_mount("/mnt/A/AA/B"), true);
+
+ new_flags = read_mnt_flags("/mnt/A/AA/B/BB");
+ ASSERT_EQ(new_flags, expected_flags);
+
+ ASSERT_EQ(is_shared_mount("/mnt/A/AA/B/BB"), true);
+}
+
+TEST_F(mount_setattr, wrong_user_namespace)
+{
+ int ret;
+ struct mount_attr attr = {
+ .attr_set = MOUNT_ATTR_RDONLY,
+ };
+
+ if (!mount_setattr_supported())
+ SKIP(return, "mount_setattr syscall not supported");
+
+ EXPECT_EQ(create_and_enter_userns(), 0);
+ ret = sys_mount_setattr(-1, "/mnt/A", AT_RECURSIVE, &attr, sizeof(attr));
+ ASSERT_LT(ret, 0);
+ ASSERT_EQ(errno, EPERM);
+}
+
+TEST_F(mount_setattr, wrong_mount_namespace)
+{
+ int fd, ret;
+ struct mount_attr attr = {
+ .attr_set = MOUNT_ATTR_RDONLY,
+ };
+
+ if (!mount_setattr_supported())
+ SKIP(return, "mount_setattr syscall not supported");
+
+ fd = open("/mnt/A", O_DIRECTORY | O_CLOEXEC);
+ ASSERT_GE(fd, 0);
+
+ ASSERT_EQ(unshare(CLONE_NEWNS), 0);
+
+ ret = sys_mount_setattr(fd, "", AT_EMPTY_PATH | AT_RECURSIVE, &attr, sizeof(attr));
+ ASSERT_LT(ret, 0);
+ ASSERT_EQ(errno, EINVAL);
+}
+
+FIXTURE(mount_setattr_idmapped) {
+};
+
+FIXTURE_SETUP(mount_setattr_idmapped)
+{
+ int img_fd = -EBADF;
+
+ ASSERT_EQ(unshare(CLONE_NEWNS), 0);
+
+ ASSERT_EQ(mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0), 0);
+
+ (void)umount2("/mnt", MNT_DETACH);
+ (void)umount2("/tmp", MNT_DETACH);
+
+ ASSERT_EQ(mount("testing", "/tmp", "tmpfs", MS_NOATIME | MS_NODEV,
+ "size=100000,mode=700"), 0);
+
+ ASSERT_EQ(mkdir("/tmp/B", 0777), 0);
+ ASSERT_EQ(mknodat(-EBADF, "/tmp/B/b", S_IFREG | 0644, 0), 0);
+ ASSERT_EQ(chown("/tmp/B/b", 0, 0), 0);
+
+ ASSERT_EQ(mount("testing", "/tmp/B", "tmpfs", MS_NOATIME | MS_NODEV,
+ "size=100000,mode=700"), 0);
+
+ ASSERT_EQ(mkdir("/tmp/B/BB", 0777), 0);
+ ASSERT_EQ(mknodat(-EBADF, "/tmp/B/BB/b", S_IFREG | 0644, 0), 0);
+ ASSERT_EQ(chown("/tmp/B/BB/b", 0, 0), 0);
+
+ ASSERT_EQ(mount("testing", "/tmp/B/BB", "tmpfs", MS_NOATIME | MS_NODEV,
+ "size=100000,mode=700"), 0);
+
+ ASSERT_EQ(mount("testing", "/mnt", "tmpfs", MS_NOATIME | MS_NODEV,
+ "size=100000,mode=700"), 0);
+
+ ASSERT_EQ(mkdir("/mnt/A", 0777), 0);
+
+ ASSERT_EQ(mount("testing", "/mnt/A", "tmpfs", MS_NOATIME | MS_NODEV,
+ "size=100000,mode=700"), 0);
+
+ ASSERT_EQ(mkdir("/mnt/A/AA", 0777), 0);
+
+ ASSERT_EQ(mount("/tmp", "/mnt/A/AA", NULL, MS_BIND | MS_REC, NULL), 0);
+
+ ASSERT_EQ(mkdir("/mnt/B", 0777), 0);
+
+ ASSERT_EQ(mount("testing", "/mnt/B", "ramfs",
+ MS_NOATIME | MS_NODEV | MS_NOSUID, 0), 0);
+
+ ASSERT_EQ(mkdir("/mnt/B/BB", 0777), 0);
+
+ ASSERT_EQ(mount("testing", "/tmp/B/BB", "devpts",
+ MS_RELATIME | MS_NOEXEC | MS_RDONLY, 0), 0);
+
+ ASSERT_EQ(mkdir("/mnt/C", 0777), 0);
+ ASSERT_EQ(mkdir("/mnt/D", 0777), 0);
+ img_fd = openat(-EBADF, "/mnt/C/ext4.img", O_CREAT | O_WRONLY, 0600);
+ ASSERT_GE(img_fd, 0);
+ ASSERT_EQ(ftruncate(img_fd, 1024 * 2048), 0);
+ ASSERT_EQ(system("mkfs.ext4 -q /mnt/C/ext4.img"), 0);
+ ASSERT_EQ(system("mount -o loop -t ext4 /mnt/C/ext4.img /mnt/D/"), 0);
+ ASSERT_EQ(close(img_fd), 0);
+}
+
+FIXTURE_TEARDOWN(mount_setattr_idmapped)
+{
+ (void)umount2("/mnt/A", MNT_DETACH);
+ (void)umount2("/tmp", MNT_DETACH);
+}
+
+/**
+ * Validate that negative fd values are rejected.
+ */
+TEST_F(mount_setattr_idmapped, invalid_fd_negative)
+{
+ struct mount_attr attr = {
+ .attr_set = MOUNT_ATTR_IDMAP,
+ .userns_fd = -EBADF,
+ };
+
+ if (!mount_setattr_supported())
+ SKIP(return, "mount_setattr syscall not supported");
+
+ ASSERT_NE(sys_mount_setattr(-1, "/", 0, &attr, sizeof(attr)), 0) {
+ TH_LOG("failure: created idmapped mount with negative fd");
+ }
+}
+
+/**
+ * Validate that excessively large fd values are rejected.
+ */
+TEST_F(mount_setattr_idmapped, invalid_fd_large)
+{
+ struct mount_attr attr = {
+ .attr_set = MOUNT_ATTR_IDMAP,
+ .userns_fd = INT64_MAX,
+ };
+
+ if (!mount_setattr_supported())
+ SKIP(return, "mount_setattr syscall not supported");
+
+ ASSERT_NE(sys_mount_setattr(-1, "/", 0, &attr, sizeof(attr)), 0) {
+ TH_LOG("failure: created idmapped mount with too large fd value");
+ }
+}
+
+/**
+ * Validate that closed fd values are rejected.
+ */
+TEST_F(mount_setattr_idmapped, invalid_fd_closed)
+{
+ int fd;
+ struct mount_attr attr = {
+ .attr_set = MOUNT_ATTR_IDMAP,
+ };
+
+ if (!mount_setattr_supported())
+ SKIP(return, "mount_setattr syscall not supported");
+
+ fd = open("/dev/null", O_RDONLY | O_CLOEXEC);
+ ASSERT_GE(fd, 0);
+ ASSERT_GE(close(fd), 0);
+
+ attr.userns_fd = fd;
+ ASSERT_NE(sys_mount_setattr(-1, "/", 0, &attr, sizeof(attr)), 0) {
+ TH_LOG("failure: created idmapped mount with closed fd");
+ }
+}
+
+/**
+ * Validate that the initial user namespace is rejected.
+ */
+TEST_F(mount_setattr_idmapped, invalid_fd_initial_userns)
+{
+ int open_tree_fd = -EBADF;
+ struct mount_attr attr = {
+ .attr_set = MOUNT_ATTR_IDMAP,
+ };
+
+ if (!mount_setattr_supported())
+ SKIP(return, "mount_setattr syscall not supported");
+
+ open_tree_fd = sys_open_tree(-EBADF, "/mnt/D",
+ AT_NO_AUTOMOUNT |
+ AT_SYMLINK_NOFOLLOW |
+ OPEN_TREE_CLOEXEC | OPEN_TREE_CLONE);
+ ASSERT_GE(open_tree_fd, 0);
+
+ attr.userns_fd = open("/proc/1/ns/user", O_RDONLY | O_CLOEXEC);
+ ASSERT_GE(attr.userns_fd, 0);
+ ASSERT_NE(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
+ ASSERT_EQ(errno, EPERM);
+ ASSERT_EQ(close(attr.userns_fd), 0);
+ ASSERT_EQ(close(open_tree_fd), 0);
+}
+
+static int map_ids(pid_t pid, unsigned long nsid, unsigned long hostid,
+ unsigned long range)
+{
+ char map[100], procfile[256];
+
+ snprintf(procfile, sizeof(procfile), "/proc/%d/uid_map", pid);
+ snprintf(map, sizeof(map), "%lu %lu %lu", nsid, hostid, range);
+ if (write_file(procfile, map, strlen(map)))
+ return -1;
+
+
+ snprintf(procfile, sizeof(procfile), "/proc/%d/gid_map", pid);
+ snprintf(map, sizeof(map), "%lu %lu %lu", nsid, hostid, range);
+ if (write_file(procfile, map, strlen(map)))
+ return -1;
+
+ return 0;
+}
+
+#define __STACK_SIZE (8 * 1024 * 1024)
+static pid_t do_clone(int (*fn)(void *), void *arg, int flags)
+{
+ void *stack;
+
+ stack = malloc(__STACK_SIZE);
+ if (!stack)
+ return -ENOMEM;
+
+#ifdef __ia64__
+ return __clone2(fn, stack, __STACK_SIZE, flags | SIGCHLD, arg, NULL);
+#else
+ return clone(fn, stack + __STACK_SIZE, flags | SIGCHLD, arg, NULL);
+#endif
+}
+
+static int get_userns_fd_cb(void *data)
+{
+ return kill(getpid(), SIGSTOP);
+}
+
+static int wait_for_pid(pid_t pid)
+{
+ int status, ret;
+
+again:
+ ret = waitpid(pid, &status, 0);
+ if (ret == -1) {
+ if (errno == EINTR)
+ goto again;
+
+ return -1;
+ }
+
+ if (!WIFEXITED(status))
+ return -1;
+
+ return WEXITSTATUS(status);
+}
+
+static int get_userns_fd(unsigned long nsid, unsigned long hostid, unsigned long range)
+{
+ int ret;
+ pid_t pid;
+ char path[256];
+
+ pid = do_clone(get_userns_fd_cb, NULL, CLONE_NEWUSER);
+ if (pid < 0)
+ return -errno;
+
+ ret = map_ids(pid, nsid, hostid, range);
+ if (ret < 0)
+ return ret;
+
+ snprintf(path, sizeof(path), "/proc/%d/ns/user", pid);
+ ret = open(path, O_RDONLY | O_CLOEXEC);
+ kill(pid, SIGKILL);
+ wait_for_pid(pid);
+ return ret;
+}
+
+/**
+ * Validate that an attached mount in our mount namespace can be idmapped.
+ * (The kernel enforces that the mount's mount namespace and the caller's mount
+ * namespace match.)
+ */
+TEST_F(mount_setattr_idmapped, attached_mount_inside_current_mount_namespace)
+{
+ int open_tree_fd = -EBADF;
+ struct mount_attr attr = {
+ .attr_set = MOUNT_ATTR_IDMAP,
+ };
+
+ if (!mount_setattr_supported())
+ SKIP(return, "mount_setattr syscall not supported");
+
+ open_tree_fd = sys_open_tree(-EBADF, "/mnt/D",
+ AT_EMPTY_PATH |
+ AT_NO_AUTOMOUNT |
+ AT_SYMLINK_NOFOLLOW |
+ OPEN_TREE_CLOEXEC);
+ ASSERT_GE(open_tree_fd, 0);
+
+ attr.userns_fd = get_userns_fd(0, 10000, 10000);
+ ASSERT_GE(attr.userns_fd, 0);
+ ASSERT_EQ(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
+ ASSERT_EQ(close(attr.userns_fd), 0);
+ ASSERT_EQ(close(open_tree_fd), 0);
+}
+
+/**
+ * Validate that idmapping a mount is rejected if the mount's mount namespace
+ * and our mount namespace don't match.
+ * (The kernel enforces that the mount's mount namespace and the caller's mount
+ * namespace match.)
+ */
+TEST_F(mount_setattr_idmapped, attached_mount_outside_current_mount_namespace)
+{
+ int open_tree_fd = -EBADF;
+ struct mount_attr attr = {
+ .attr_set = MOUNT_ATTR_IDMAP,
+ };
+
+ if (!mount_setattr_supported())
+ SKIP(return, "mount_setattr syscall not supported");
+
+ open_tree_fd = sys_open_tree(-EBADF, "/mnt/D",
+ AT_EMPTY_PATH |
+ AT_NO_AUTOMOUNT |
+ AT_SYMLINK_NOFOLLOW |
+ OPEN_TREE_CLOEXEC);
+ ASSERT_GE(open_tree_fd, 0);
+
+ ASSERT_EQ(unshare(CLONE_NEWNS), 0);
+
+ attr.userns_fd = get_userns_fd(0, 10000, 10000);
+ ASSERT_GE(attr.userns_fd, 0);
+ ASSERT_NE(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr,
+ sizeof(attr)), 0);
+ ASSERT_EQ(close(attr.userns_fd), 0);
+ ASSERT_EQ(close(open_tree_fd), 0);
+}
+
+/**
+ * Validate that an attached mount in our mount namespace can be idmapped.
+ */
+TEST_F(mount_setattr_idmapped, detached_mount_inside_current_mount_namespace)
+{
+ int open_tree_fd = -EBADF;
+ struct mount_attr attr = {
+ .attr_set = MOUNT_ATTR_IDMAP,
+ };
+
+ if (!mount_setattr_supported())
+ SKIP(return, "mount_setattr syscall not supported");
+
+ open_tree_fd = sys_open_tree(-EBADF, "/mnt/D",
+ AT_EMPTY_PATH |
+ AT_NO_AUTOMOUNT |
+ AT_SYMLINK_NOFOLLOW |
+ OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(open_tree_fd, 0);
+
+ /* Changing mount properties on a detached mount. */
+ attr.userns_fd = get_userns_fd(0, 10000, 10000);
+ ASSERT_GE(attr.userns_fd, 0);
+ ASSERT_EQ(sys_mount_setattr(open_tree_fd, "",
+ AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
+ ASSERT_EQ(close(attr.userns_fd), 0);
+ ASSERT_EQ(close(open_tree_fd), 0);
+}
+
+/**
+ * Validate that a detached mount not in our mount namespace can be idmapped.
+ */
+TEST_F(mount_setattr_idmapped, detached_mount_outside_current_mount_namespace)
+{
+ int open_tree_fd = -EBADF;
+ struct mount_attr attr = {
+ .attr_set = MOUNT_ATTR_IDMAP,
+ };
+
+ if (!mount_setattr_supported())
+ SKIP(return, "mount_setattr syscall not supported");
+
+ open_tree_fd = sys_open_tree(-EBADF, "/mnt/D",
+ AT_EMPTY_PATH |
+ AT_NO_AUTOMOUNT |
+ AT_SYMLINK_NOFOLLOW |
+ OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(open_tree_fd, 0);
+
+ ASSERT_EQ(unshare(CLONE_NEWNS), 0);
+
+ /* Changing mount properties on a detached mount. */
+ attr.userns_fd = get_userns_fd(0, 10000, 10000);
+ ASSERT_GE(attr.userns_fd, 0);
+ ASSERT_EQ(sys_mount_setattr(open_tree_fd, "",
+ AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
+ ASSERT_EQ(close(attr.userns_fd), 0);
+ ASSERT_EQ(close(open_tree_fd), 0);
+}
+
+/**
+ * Validate that currently changing the idmapping of an idmapped mount fails.
+ */
+TEST_F(mount_setattr_idmapped, change_idmapping)
+{
+ int open_tree_fd = -EBADF;
+ struct mount_attr attr = {
+ .attr_set = MOUNT_ATTR_IDMAP,
+ };
+
+ if (!mount_setattr_supported())
+ SKIP(return, "mount_setattr syscall not supported");
+
+ open_tree_fd = sys_open_tree(-EBADF, "/mnt/D",
+ AT_EMPTY_PATH |
+ AT_NO_AUTOMOUNT |
+ AT_SYMLINK_NOFOLLOW |
+ OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(open_tree_fd, 0);
+
+ attr.userns_fd = get_userns_fd(0, 10000, 10000);
+ ASSERT_GE(attr.userns_fd, 0);
+ ASSERT_EQ(sys_mount_setattr(open_tree_fd, "",
+ AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
+ ASSERT_EQ(close(attr.userns_fd), 0);
+
+ /* Change idmapping on a detached mount that is already idmapped. */
+ attr.userns_fd = get_userns_fd(0, 20000, 10000);
+ ASSERT_GE(attr.userns_fd, 0);
+ ASSERT_NE(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
+ ASSERT_EQ(close(attr.userns_fd), 0);
+ ASSERT_EQ(close(open_tree_fd), 0);
+}
+
+static bool expected_uid_gid(int dfd, const char *path, int flags,
+ uid_t expected_uid, gid_t expected_gid)
+{
+ int ret;
+ struct stat st;
+
+ ret = fstatat(dfd, path, &st, flags);
+ if (ret < 0)
+ return false;
+
+ return st.st_uid == expected_uid && st.st_gid == expected_gid;
+}
+
+TEST_F(mount_setattr_idmapped, idmap_mount_tree_invalid)
+{
+ int open_tree_fd = -EBADF;
+ struct mount_attr attr = {
+ .attr_set = MOUNT_ATTR_IDMAP,
+ };
+
+ if (!mount_setattr_supported())
+ SKIP(return, "mount_setattr syscall not supported");
+
+ ASSERT_EQ(expected_uid_gid(-EBADF, "/tmp/B/b", 0, 0, 0), 0);
+ ASSERT_EQ(expected_uid_gid(-EBADF, "/tmp/B/BB/b", 0, 0, 0), 0);
+
+ open_tree_fd = sys_open_tree(-EBADF, "/mnt/A",
+ AT_RECURSIVE |
+ AT_EMPTY_PATH |
+ AT_NO_AUTOMOUNT |
+ AT_SYMLINK_NOFOLLOW |
+ OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(open_tree_fd, 0);
+
+ attr.userns_fd = get_userns_fd(0, 10000, 10000);
+ ASSERT_GE(attr.userns_fd, 0);
+ ASSERT_NE(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
+ ASSERT_EQ(close(attr.userns_fd), 0);
+ ASSERT_EQ(close(open_tree_fd), 0);
+
+ ASSERT_EQ(expected_uid_gid(-EBADF, "/tmp/B/b", 0, 0, 0), 0);
+ ASSERT_EQ(expected_uid_gid(-EBADF, "/tmp/B/BB/b", 0, 0, 0), 0);
+ ASSERT_EQ(expected_uid_gid(open_tree_fd, "B/b", 0, 0, 0), 0);
+ ASSERT_EQ(expected_uid_gid(open_tree_fd, "B/BB/b", 0, 0, 0), 0);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
index 0d783e1065c8..442b666ccdb5 100755
--- a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
+++ b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
@@ -1,28 +1,13 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0-only
-KSELFTESTS_SKIP=4
-
. ./eeh-functions.sh
-if ! eeh_supported ; then
- echo "EEH not supported on this system, skipping"
- exit $KSELFTESTS_SKIP;
-fi
-
-if [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_check" ] && \
- [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_break" ] ; then
- echo "debugfs EEH testing files are missing. Is debugfs mounted?"
- exit $KSELFTESTS_SKIP;
-fi
+eeh_test_prep # NB: may exit
pre_lspci=`mktemp`
lspci > $pre_lspci
-# Bump the max freeze count to something absurd so we don't
-# trip over it while breaking things.
-echo 5000 > /sys/kernel/debug/powerpc/eeh_max_freezes
-
# record the devices that we break in here. Assuming everything
# goes to plan we should get them back once the recover process
# is finished.
@@ -30,34 +15,16 @@ devices=""
# Build up a list of candidate devices.
for dev in `ls -1 /sys/bus/pci/devices/ | grep '\.0$'` ; do
- # skip bridges since we can't recover them (yet...)
- if [ -e "/sys/bus/pci/devices/$dev/pci_bus" ] ; then
- echo "$dev, Skipped: bridge"
+ if ! eeh_can_break $dev ; then
continue;
fi
- # Skip VFs for now since we don't have a reliable way
- # to break them.
+ # Skip VFs for now since we don't have a reliable way to break them.
if [ -e "/sys/bus/pci/devices/$dev/physfn" ] ; then
echo "$dev, Skipped: virtfn"
continue;
fi
- if [ "ahci" = "$(basename $(realpath /sys/bus/pci/devices/$dev/driver))" ] ; then
- echo "$dev, Skipped: ahci doesn't support recovery"
- continue
- fi
-
- # Don't inject errosr into an already-frozen PE. This happens with
- # PEs that contain multiple PCI devices (e.g. multi-function cards)
- # and injecting new errors during the recovery process will probably
- # result in the recovery failing and the device being marked as
- # failed.
- if ! pe_ok $dev ; then
- echo "$dev, Skipped: Bad initial PE state"
- continue;
- fi
-
echo "$dev, Added"
# Add to this list of device to check
@@ -86,5 +53,5 @@ echo "$failed devices failed to recover ($dev_count tested)"
lspci | diff -u $pre_lspci -
rm -f $pre_lspci
-test "$failed" == 0
+test "$failed" -eq 0
exit $?
diff --git a/tools/testing/selftests/powerpc/eeh/eeh-functions.sh b/tools/testing/selftests/powerpc/eeh/eeh-functions.sh
index 00dc32c0ed75..70daa3925dcb 100755..100644
--- a/tools/testing/selftests/powerpc/eeh/eeh-functions.sh
+++ b/tools/testing/selftests/powerpc/eeh/eeh-functions.sh
@@ -1,6 +1,12 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0-only
+export KSELFTESTS_SKIP=4
+
+log() {
+ echo >/dev/stderr $*
+}
+
pe_ok() {
local dev="$1"
local path="/sys/bus/pci/devices/$dev/eeh_pe_state"
@@ -39,6 +45,52 @@ eeh_supported() {
grep -q 'EEH Subsystem is enabled' /proc/powerpc/eeh
}
+eeh_test_prep() {
+ if ! eeh_supported ; then
+ echo "EEH not supported on this system, skipping"
+ exit $KSELFTESTS_SKIP;
+ fi
+
+ if [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_check" ] && \
+ [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_break" ] ; then
+ log "debugfs EEH testing files are missing. Is debugfs mounted?"
+ exit $KSELFTESTS_SKIP;
+ fi
+
+ # Bump the max freeze count to something absurd so we don't
+ # trip over it while breaking things.
+ echo 5000 > /sys/kernel/debug/powerpc/eeh_max_freezes
+}
+
+eeh_can_break() {
+ # skip bridges since we can't recover them (yet...)
+ if [ -e "/sys/bus/pci/devices/$dev/pci_bus" ] ; then
+ log "$dev, Skipped: bridge"
+ return 1;
+ fi
+
+ # The ahci driver doesn't support error recovery. If the ahci device
+ # happens to be hosting the root filesystem, and then we go and break
+ # it the system will generally go down. We should probably fix that
+ # at some point
+ if [ "ahci" = "$(basename $(realpath /sys/bus/pci/devices/$dev/driver))" ] ; then
+ log "$dev, Skipped: ahci doesn't support recovery"
+ return 1;
+ fi
+
+ # Don't inject errosr into an already-frozen PE. This happens with
+ # PEs that contain multiple PCI devices (e.g. multi-function cards)
+ # and injecting new errors during the recovery process will probably
+ # result in the recovery failing and the device being marked as
+ # failed.
+ if ! pe_ok $dev ; then
+ log "$dev, Skipped: Bad initial PE state"
+ return 1;
+ fi
+
+ return 0
+}
+
eeh_one_dev() {
local dev="$1"
@@ -46,7 +98,7 @@ eeh_one_dev() {
# testing so check that the argument is a well-formed sysfs device
# name.
if ! test -e /sys/bus/pci/devices/$dev/ ; then
- echo "Error: '$dev' must be a sysfs device name (DDDD:BB:DD.F)"
+ log "Error: '$dev' must be a sysfs device name (DDDD:BB:DD.F)"
return 1;
fi
@@ -70,16 +122,124 @@ eeh_one_dev() {
if pe_ok $dev ; then
break;
fi
- echo "$dev, waited $i/${max_wait}"
+ log "$dev, waited $i/${max_wait}"
sleep 1
done
if ! pe_ok $dev ; then
- echo "$dev, Failed to recover!"
+ log "$dev, Failed to recover!"
return 1;
fi
- echo "$dev, Recovered after $i seconds"
+ log "$dev, Recovered after $i seconds"
return 0;
}
+eeh_has_driver() {
+ test -e /sys/bus/pci/devices/$1/driver;
+ return $?
+}
+
+eeh_can_recover() {
+ # we'll get an IO error if the device's current driver doesn't support
+ # error recovery
+ echo $1 > '/sys/kernel/debug/powerpc/eeh_dev_can_recover' 2>/dev/null
+
+ return $?
+}
+
+eeh_find_all_pfs() {
+ devices=""
+
+ # SR-IOV on pseries requires hypervisor support, so check for that
+ is_pseries=""
+ if grep -q pSeries /proc/cpuinfo ; then
+ if [ ! -f /proc/device-tree/rtas/ibm,open-sriov-allow-unfreeze ] ||
+ [ ! -f /proc/device-tree/rtas/ibm,open-sriov-map-pe-number ] ; then
+ return 1;
+ fi
+
+ is_pseries="true"
+ fi
+
+ for dev in `ls -1 /sys/bus/pci/devices/` ; do
+ sysfs="/sys/bus/pci/devices/$dev"
+ if [ ! -e "$sysfs/sriov_numvfs" ] ; then
+ continue
+ fi
+
+ # skip unsupported PFs on pseries
+ if [ -z "$is_pseries" ] &&
+ [ ! -f "$sysfs/of_node/ibm,is-open-sriov-pf" ] &&
+ [ ! -f "$sysfs/of_node/ibm,open-sriov-vf-bar-info" ] ; then
+ continue;
+ fi
+
+ # no driver, no vfs
+ if ! eeh_has_driver $dev ; then
+ continue
+ fi
+
+ devices="$devices $dev"
+ done
+
+ if [ -z "$devices" ] ; then
+ return 1;
+ fi
+
+ echo $devices
+ return 0;
+}
+
+# attempts to enable one VF on each PF so we can do VF specific tests.
+# stdout: list of enabled VFs, one per line
+# return code: 0 if vfs are found, 1 otherwise
+eeh_enable_vfs() {
+ pf_list="$(eeh_find_all_pfs)"
+
+ vfs=0
+ for dev in $pf_list ; do
+ pf_sysfs="/sys/bus/pci/devices/$dev"
+
+ # make sure we have a single VF
+ echo 0 > "$pf_sysfs/sriov_numvfs"
+ echo 1 > "$pf_sysfs/sriov_numvfs"
+ if [ "$?" != 0 ] ; then
+ log "Unable to enable VFs on $pf, skipping"
+ continue;
+ fi
+
+ vf="$(basename $(realpath "$pf_sysfs/virtfn0"))"
+ if [ $? != 0 ] ; then
+ log "unable to find enabled vf on $pf"
+ echo 0 > "$pf_sysfs/sriov_numvfs"
+ continue;
+ fi
+
+ if ! eeh_can_break $vf ; then
+ log "skipping "
+
+ echo 0 > "$pf_sysfs/sriov_numvfs"
+ continue;
+ fi
+
+ vfs="$((vfs + 1))"
+ echo $vf
+ done
+
+ test "$vfs" != 0
+ return $?
+}
+
+eeh_disable_vfs() {
+ pf_list="$(eeh_find_all_pfs)"
+ if [ -z "$pf_list" ] ; then
+ return 1;
+ fi
+
+ for dev in $pf_list ; do
+ echo 0 > "/sys/bus/pci/devices/$dev/sriov_numvfs"
+ done
+
+ return 0;
+}
diff --git a/tools/testing/selftests/powerpc/eeh/eeh-vf-aware.sh b/tools/testing/selftests/powerpc/eeh/eeh-vf-aware.sh
new file mode 100755
index 000000000000..874c11953bb6
--- /dev/null
+++ b/tools/testing/selftests/powerpc/eeh/eeh-vf-aware.sh
@@ -0,0 +1,45 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-only
+
+. ./eeh-functions.sh
+
+eeh_test_prep # NB: may exit
+
+vf_list="$(eeh_enable_vfs)";
+if $? != 0 ; then
+ log "No usable VFs found. Skipping EEH unaware VF test"
+ exit $KSELFTESTS_SKIP;
+fi
+
+log "Enabled VFs: $vf_list"
+
+tested=0
+passed=0
+for vf in $vf_list ; do
+ log "Testing $vf"
+
+ if ! eeh_can_recover $vf ; then
+ log "Driver for $vf doesn't support error recovery, skipping"
+ continue;
+ fi
+
+ tested="$((tested + 1))"
+
+ log "Breaking $vf..."
+ if ! eeh_one_dev $vf ; then
+ log "$vf failed to recover"
+ continue;
+ fi
+
+ passed="$((passed + 1))"
+done
+
+eeh_disable_vfs
+
+if [ "$tested" == 0 ] ; then
+ echo "No VFs with EEH aware drivers found, skipping"
+ exit $KSELFTESTS_SKIP
+fi
+
+test "$failed" != 0
+exit $?;
diff --git a/tools/testing/selftests/powerpc/eeh/eeh-vf-unaware.sh b/tools/testing/selftests/powerpc/eeh/eeh-vf-unaware.sh
new file mode 100755
index 000000000000..8a4c147b9d43
--- /dev/null
+++ b/tools/testing/selftests/powerpc/eeh/eeh-vf-unaware.sh
@@ -0,0 +1,35 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-only
+
+. ./eeh-functions.sh
+
+eeh_test_prep # NB: may exit
+
+vf_list="$(eeh_enable_vfs)";
+if $? != 0 ; then
+ log "No usable VFs found. Skipping EEH unaware VF test"
+ exit $KSELFTESTS_SKIP;
+fi
+
+log "Enabled VFs: $vf_list"
+
+failed=0
+for vf in $vf_list ; do
+ log "Testing $vf"
+
+ if eeh_can_recover $vf ; then
+ log "Driver for $vf supports error recovery. Unbinding..."
+ echo "$vf" > /sys/bus/pci/devices/$vf/driver/unbind
+ fi
+
+ log "Breaking $vf..."
+ if ! eeh_one_dev $vf ; then
+ log "$vf failed to recover"
+ failed="$((failed + 1))"
+ fi
+done
+
+eeh_disable_vfs
+
+test "$failed" != 0
+exit $?;
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 26c72f2b61b1..98c3b647f54d 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -315,7 +315,7 @@ TEST(kcmp)
ret = __filecmp(getpid(), getpid(), 1, 1);
EXPECT_EQ(ret, 0);
if (ret != 0 && errno == ENOSYS)
- SKIP(return, "Kernel does not support kcmp() (missing CONFIG_CHECKPOINT_RESTORE?)");
+ SKIP(return, "Kernel does not support kcmp() (missing CONFIG_KCMP?)");
}
TEST(mode_strict_support)
@@ -4019,18 +4019,14 @@ TEST(user_notification_addfd)
/* Verify we can set an arbitrary remote fd */
fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd);
- /*
- * The child has fds 0(stdin), 1(stdout), 2(stderr), 3(memfd),
- * 4(listener), so the newly allocated fd should be 5.
- */
- EXPECT_EQ(fd, 5);
+ EXPECT_GE(fd, 0);
EXPECT_EQ(filecmp(getpid(), pid, memfd, fd), 0);
/* Verify we can set an arbitrary remote fd with large size */
memset(&big, 0x0, sizeof(big));
big.addfd = addfd;
fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD_BIG, &big);
- EXPECT_EQ(fd, 6);
+ EXPECT_GE(fd, 0);
/* Verify we can set a specific remote fd */
addfd.newfd = 42;
diff --git a/tools/testing/selftests/timens/.gitignore b/tools/testing/selftests/timens/.gitignore
index 2e43851b47c1..fe1eb8271b35 100644
--- a/tools/testing/selftests/timens/.gitignore
+++ b/tools/testing/selftests/timens/.gitignore
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
clock_nanosleep
exec
+futex
gettime_perf
gettime_perf_cold
procfs
diff --git a/tools/testing/selftests/vDSO/vdso_config.h b/tools/testing/selftests/vDSO/vdso_config.h
index 6a6fe8d4ff55..6188b16827d1 100644
--- a/tools/testing/selftests/vDSO/vdso_config.h
+++ b/tools/testing/selftests/vDSO/vdso_config.h
@@ -47,10 +47,12 @@
#elif defined(__x86_64__)
#define VDSO_VERSION 0
#define VDSO_NAMES 1
-#elif defined(__riscv__)
+#elif defined(__riscv__) || defined(__riscv)
#define VDSO_VERSION 5
#define VDSO_NAMES 1
+#if __riscv_xlen == 32
#define VDSO_32BIT 1
+#endif
#else /* nds32 */
#define VDSO_VERSION 4
#define VDSO_NAMES 1
diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh
index 74c69b75f6f5..7ed7cd95e58f 100755
--- a/tools/testing/selftests/wireguard/netns.sh
+++ b/tools/testing/selftests/wireguard/netns.sh
@@ -39,7 +39,7 @@ ip0() { pretty 0 "ip $*"; ip -n $netns0 "$@"; }
ip1() { pretty 1 "ip $*"; ip -n $netns1 "$@"; }
ip2() { pretty 2 "ip $*"; ip -n $netns2 "$@"; }
sleep() { read -t "$1" -N 1 || true; }
-waitiperf() { pretty "${1//*-}" "wait for iperf:5201 pid $2"; while [[ $(ss -N "$1" -tlpH 'sport = 5201') != *\"iperf3\",pid=$2,fd=* ]]; do sleep 0.1; done; }
+waitiperf() { pretty "${1//*-}" "wait for iperf:${3:-5201} pid $2"; while [[ $(ss -N "$1" -tlpH "sport = ${3:-5201}") != *\"iperf3\",pid=$2,fd=* ]]; do sleep 0.1; done; }
waitncatudp() { pretty "${1//*-}" "wait for udp:1111 pid $2"; while [[ $(ss -N "$1" -ulpH 'sport = 1111') != *\"ncat\",pid=$2,fd=* ]]; do sleep 0.1; done; }
waitiface() { pretty "${1//*-}" "wait for $2 to come up"; ip netns exec "$1" bash -c "while [[ \$(< \"/sys/class/net/$2/operstate\") != up ]]; do read -t .1 -N 0 || true; done;"; }
@@ -141,6 +141,19 @@ tests() {
n2 iperf3 -s -1 -B fd00::2 &
waitiperf $netns2 $!
n1 iperf3 -Z -t 3 -b 0 -u -c fd00::2
+
+ # TCP over IPv4, in parallel
+ for max in 4 5 50; do
+ local pids=( )
+ for ((i=0; i < max; ++i)) do
+ n2 iperf3 -p $(( 5200 + i )) -s -1 -B 192.168.241.2 &
+ pids+=( $! ); waitiperf $netns2 $! $(( 5200 + i ))
+ done
+ for ((i=0; i < max; ++i)) do
+ n1 iperf3 -Z -t 3 -p $(( 5200 + i )) -c 192.168.241.2 &
+ done
+ wait "${pids[@]}"
+ done
}
[[ $(ip1 link show dev wg0) =~ mtu\ ([0-9]+) ]] && orig_mtu="${BASH_REMATCH[1]}"
diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c
index 1aef72df20a1..3a29346e1452 100644
--- a/tools/testing/selftests/x86/ldt_gdt.c
+++ b/tools/testing/selftests/x86/ldt_gdt.c
@@ -607,7 +607,7 @@ static void do_multicpu_tests(void)
failures++;
asm volatile ("mov %0, %%ss" : : "rm" (orig_ss));
- };
+ }
ftx = 100; /* Kill the thread. */
syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0);