1 /*
2 * Support for decoding of KVM_* ioctl commands.
3 *
4 * Copyright (c) 2017 Masatake YAMATO <yamato@redhat.com>
5 * Copyright (c) 2017 Red Hat, Inc.
6 * Copyright (c) 2017-2021 The strace developers.
7 * All rights reserved.
8 *
9 * SPDX-License-Identifier: LGPL-2.1-or-later
10 */
11
12 #include "defs.h"
13
14 #ifdef HAVE_LINUX_KVM_H
15 # include <linux/kvm.h>
16 # include "arch_kvm.c"
17 # include "xmalloc.h"
18 # include "mmap_cache.h"
19
20 struct vcpu_info {
21 struct vcpu_info *next;
22 int fd;
23 int cpuid;
24 long mmap_addr;
25 unsigned long mmap_len;
26 bool resolved;
27 };
28
29 static bool dump_kvm_run_structure;
30
31 static struct vcpu_info *
32 vcpu_find(struct tcb *const tcp, int fd)
33 {
34 for (struct vcpu_info *vcpu_info = tcp->vcpu_info_list;
35 vcpu_info;
36 vcpu_info = vcpu_info->next)
37 if (vcpu_info->fd == fd)
38 return vcpu_info;
39
40 return NULL;
41 }
42
43 static struct vcpu_info *
44 vcpu_alloc(struct tcb *const tcp, int fd, int cpuid)
45 {
46 struct vcpu_info *vcpu_info = xzalloc(sizeof(*vcpu_info));
47
48 vcpu_info->fd = fd;
49 vcpu_info->cpuid = cpuid;
50
51 vcpu_info->next = tcp->vcpu_info_list;
52 tcp->vcpu_info_list = vcpu_info;
53
54 return vcpu_info;
55 }
56
57 void
58 kvm_vcpu_info_free(struct tcb *tcp)
59 {
60 struct vcpu_info *next;
61
62 for (struct vcpu_info *head = tcp->vcpu_info_list; head; head = next) {
63 next = head->next;
64 free(head);
65 }
66
67 tcp->vcpu_info_list = NULL;
68 }
69
70 static void
71 vcpu_register(struct tcb *const tcp, int fd, int cpuid)
72 {
73 if (fd < 0)
74 return;
75
76 struct vcpu_info *vcpu_info = vcpu_find(tcp, fd);
77
78 if (!vcpu_info) {
79 vcpu_alloc(tcp, fd, cpuid);
80 } else if (vcpu_info->cpuid != cpuid) {
81 vcpu_info->cpuid = cpuid;
82 vcpu_info->resolved = false;
83 }
84 }
85
86 static bool
87 is_map_for_file(struct mmap_cache_entry_t *map_info, void *data)
88 {
89 /* major version for anon inode may be given in get_anon_bdev()
90 * in linux kernel.
91 *
92 * *p = MKDEV(0, dev & MINORMASK);
93 *-----------------^
94 */
95 return map_info->binary_filename &&
96 map_info->major == 0 &&
97 strcmp(map_info->binary_filename, data) == 0;
98 }
99
100 static unsigned long
101 map_len(struct mmap_cache_entry_t *map_info)
102 {
103 return map_info->start_addr < map_info->end_addr
104 ? map_info->end_addr - map_info->start_addr
105 : 0;
106 }
107
108 # define VCPU_DENTRY_PREFIX "anon_inode:kvm-vcpu:"
109
110 static struct vcpu_info*
111 vcpu_get_info(struct tcb *const tcp, int fd)
112 {
113 struct vcpu_info *vcpu_info = vcpu_find(tcp, fd);
114 struct mmap_cache_entry_t *map_info;
115 const char *cpuid_str;
116
117 enum mmap_cache_rebuild_result mc_stat =
118 mmap_cache_rebuild_if_invalid(tcp, __func__);
119 if (mc_stat == MMAP_CACHE_REBUILD_NOCACHE)
120 return NULL;
121
122 if (vcpu_info && vcpu_info->resolved) {
123 if (mc_stat == MMAP_CACHE_REBUILD_READY)
124 return vcpu_info;
125 else {
126 map_info = mmap_cache_search(tcp, vcpu_info->mmap_addr);
127 if (map_info) {
128 cpuid_str =
129 STR_STRIP_PREFIX(map_info->binary_filename,
130 VCPU_DENTRY_PREFIX);
131 if (cpuid_str != map_info->binary_filename) {
132 int cpuid = string_to_uint(cpuid_str);
133 if (cpuid < 0)
134 return NULL;
135 if (vcpu_info->cpuid == cpuid)
136 return vcpu_info;
137 }
138 }
139
140 /* The vcpu vma may be mremap'ed. */
141 vcpu_info->resolved = false;
142 }
143 }
144
145 /* Slow path: !vcpu_info || !vcpu_info->resolved */
146 char path[PATH_MAX + 1];
147 cpuid_str = path;
148 if (getfdpath(tcp, fd, path, sizeof(path)) >= 0)
149 cpuid_str = STR_STRIP_PREFIX(path, VCPU_DENTRY_PREFIX);
150 if (cpuid_str == path)
151 map_info = NULL;
152 else
153 map_info = mmap_cache_search_custom(tcp, is_map_for_file, path);
154
155 if (map_info) {
156 int cpuid = string_to_uint(cpuid_str);
157 if (cpuid < 0)
158 return NULL;
159 if (!vcpu_info)
160 vcpu_info = vcpu_alloc(tcp, fd, cpuid);
161 else if (vcpu_info->cpuid != cpuid)
162 vcpu_info->cpuid = cpuid;
163 vcpu_info->mmap_addr = map_info->start_addr;
164 vcpu_info->mmap_len = map_len(map_info);
165 vcpu_info->resolved = true;
166 return vcpu_info;
167 }
168
169 return NULL;
170 }
171
172 static int
173 kvm_ioctl_create_vcpu(struct tcb *const tcp, const kernel_ulong_t arg)
174 {
175 uint32_t cpuid = arg;
176
177 if (entering(tcp)) {
178 tprint_arg_next();
179 PRINT_VAL_U(cpuid);
180 if (dump_kvm_run_structure)
181 return 0;
182 } else if (!syserror(tcp)) {
183 vcpu_register(tcp, tcp->u_rval, cpuid);
184 }
185
186 return RVAL_IOCTL_DECODED | RVAL_FD;
187 }
188
189 # ifdef HAVE_STRUCT_KVM_USERSPACE_MEMORY_REGION
190 # include "xlat/kvm_mem_flags.h"
191 static int
192 kvm_ioctl_set_user_memory_region(struct tcb *const tcp, const kernel_ulong_t arg)
193 {
194 struct kvm_userspace_memory_region u_memory_region;
195
196 tprint_arg_next();
197 if (umove_or_printaddr(tcp, arg, &u_memory_region))
198 return RVAL_IOCTL_DECODED;
199
200 tprint_struct_begin();
201 PRINT_FIELD_U(u_memory_region, slot);
202 tprint_struct_next();
203 PRINT_FIELD_FLAGS(u_memory_region, flags, kvm_mem_flags, "KVM_MEM_???");
204 tprint_struct_next();
205 PRINT_FIELD_X(u_memory_region, guest_phys_addr);
206 tprint_struct_next();
207 PRINT_FIELD_U(u_memory_region, memory_size);
208 tprint_struct_next();
209 PRINT_FIELD_X(u_memory_region, userspace_addr);
210 tprint_struct_end();
211
212 return RVAL_IOCTL_DECODED;
213 }
214 # endif /* HAVE_STRUCT_KVM_USERSPACE_MEMORY_REGION */
215
216 # ifdef HAVE_STRUCT_KVM_REGS
217 static int
218 kvm_ioctl_decode_regs(struct tcb *const tcp, const unsigned int code,
219 const kernel_ulong_t arg)
220 {
221 struct kvm_regs regs;
222
223 if (code == KVM_GET_REGS && entering(tcp))
224 return 0;
225
226 tprint_arg_next();
227 if (!umove_or_printaddr(tcp, arg, ®s))
228 arch_print_kvm_regs(tcp, arg, ®s);
229
230 return RVAL_IOCTL_DECODED;
231 }
232 # endif /* HAVE_STRUCT_KVM_REGS */
233
234 # ifdef HAVE_STRUCT_KVM_CPUID2
235 # include "xlat/kvm_cpuid_flags.h"
236 static bool
237 print_kvm_cpuid_entry(struct tcb *const tcp,
238 void* elem_buf, size_t elem_size, void* data)
239 {
240 const struct kvm_cpuid_entry2 *entry = elem_buf;
241 tprint_struct_begin();
242 PRINT_FIELD_X(*entry, function);
243 tprint_struct_next();
244 PRINT_FIELD_X(*entry, index);
245 tprint_struct_next();
246 PRINT_FIELD_FLAGS(*entry, flags, kvm_cpuid_flags, "KVM_CPUID_FLAG_???");
247 tprint_struct_next();
248 PRINT_FIELD_X(*entry, eax);
249 tprint_struct_next();
250 PRINT_FIELD_X(*entry, ebx);
251 tprint_struct_next();
252 PRINT_FIELD_X(*entry, ecx);
253 tprint_struct_next();
254 PRINT_FIELD_X(*entry, edx);
255 tprint_struct_end();
256
257 return true;
258 }
259
260 static int
261 kvm_ioctl_decode_cpuid2(struct tcb *const tcp, const unsigned int code,
262 const kernel_ulong_t arg)
263 {
264 struct kvm_cpuid2 cpuid;
265
266 if (entering(tcp) && (code == KVM_GET_SUPPORTED_CPUID
267 # ifdef KVM_GET_EMULATED_CPUID
268 || code == KVM_GET_EMULATED_CPUID
269 # endif
270 ))
271 return 0;
272
273 tprint_arg_next();
274 if (!umove_or_printaddr(tcp, arg, &cpuid)) {
275 tprint_struct_begin();
276 PRINT_FIELD_U(cpuid, nent);
277
278 tprint_struct_next();
279 tprints_field_name("entries");
280 if (abbrev(tcp)) {
281 tprint_array_begin();
282 if (cpuid.nent)
283 tprint_more_data_follows();
284 tprint_array_end();
285
286 } else {
287 struct kvm_cpuid_entry2 entry;
288 print_array(tcp, arg + sizeof(cpuid), cpuid.nent,
289 &entry, sizeof(entry), tfetch_mem,
290 print_kvm_cpuid_entry, NULL);
291 }
292 tprint_struct_end();
293 }
294
295 return RVAL_IOCTL_DECODED;
296 }
297 # endif /* HAVE_STRUCT_KVM_CPUID2 */
298
299 # ifdef HAVE_STRUCT_KVM_SREGS
300 static int
301 kvm_ioctl_decode_sregs(struct tcb *const tcp, const unsigned int code,
302 const kernel_ulong_t arg)
303 {
304 struct kvm_sregs sregs;
305
306 if (code == KVM_GET_SREGS && entering(tcp))
307 return 0;
308
309 tprint_arg_next();
310 if (!umove_or_printaddr(tcp, arg, &sregs))
311 arch_print_kvm_sregs(tcp, arg, &sregs);
312
313 return RVAL_IOCTL_DECODED;
314 }
315 # endif /* HAVE_STRUCT_KVM_SREGS */
316
317 # include "xlat/kvm_cap.h"
318 static int
319 kvm_ioctl_decode_check_extension(struct tcb *const tcp, const unsigned int code,
320 const kernel_ulong_t arg)
321 {
322 tprint_arg_next();
323 printxval64(kvm_cap, arg, "KVM_CAP_???");
324 return RVAL_IOCTL_DECODED;
325 }
326
327 # include "xlat/kvm_exit_reason.h"
328 static void
329 kvm_ioctl_run_attach_auxstr(struct tcb *const tcp,
330 struct vcpu_info *info)
331
332 {
333 static struct kvm_run vcpu_run_struct;
334
335 if (info->mmap_len < sizeof(vcpu_run_struct))
336 return;
337
338 if (umove(tcp, info->mmap_addr, &vcpu_run_struct) < 0)
339 return;
340
341 tcp->auxstr = xlookup(kvm_exit_reason, vcpu_run_struct.exit_reason);
342 if (!tcp->auxstr)
343 tcp->auxstr = "KVM_EXIT_???";
344 }
345
346 static int
347 kvm_ioctl_decode_run(struct tcb *const tcp)
348 {
349
350 if (entering(tcp))
351 return 0;
352
353 int r = RVAL_DECODED;
354
355 if (syserror(tcp))
356 return r;
357
358 if (dump_kvm_run_structure) {
359 tcp->auxstr = NULL;
360 int fd = tcp->u_arg[0];
361 struct vcpu_info *info = vcpu_get_info(tcp, fd);
362
363 if (info) {
364 kvm_ioctl_run_attach_auxstr(tcp, info);
365 if (tcp->auxstr)
366 r |= RVAL_STR;
367 }
368 }
369
370 return r;
371 }
372
373 int
374 kvm_ioctl(struct tcb *const tcp, const unsigned int code, const kernel_ulong_t arg)
375 {
376 switch (code) {
377 case KVM_CREATE_VCPU:
378 return kvm_ioctl_create_vcpu(tcp, arg);
379
380 # ifdef HAVE_STRUCT_KVM_USERSPACE_MEMORY_REGION
381 case KVM_SET_USER_MEMORY_REGION:
382 return kvm_ioctl_set_user_memory_region(tcp, arg);
383 # endif
384
385 # ifdef HAVE_STRUCT_KVM_REGS
386 case KVM_SET_REGS:
387 case KVM_GET_REGS:
388 return kvm_ioctl_decode_regs(tcp, code, arg);
389 # endif
390
391 # ifdef HAVE_STRUCT_KVM_SREGS
392 case KVM_SET_SREGS:
393 case KVM_GET_SREGS:
394 return kvm_ioctl_decode_sregs(tcp, code, arg);
395 # endif
396
397 # ifdef HAVE_STRUCT_KVM_CPUID2
398 case KVM_SET_CPUID2:
399 case KVM_GET_SUPPORTED_CPUID:
400 # ifdef KVM_GET_EMULATED_CPUID
401 case KVM_GET_EMULATED_CPUID:
402 # endif
403 return kvm_ioctl_decode_cpuid2(tcp, code, arg);
404 # endif
405
406 case KVM_CHECK_EXTENSION:
407 return kvm_ioctl_decode_check_extension(tcp, code, arg);
408
409 case KVM_CREATE_VM:
410 return RVAL_DECODED | RVAL_FD;
411
412 case KVM_RUN:
413 return kvm_ioctl_decode_run(tcp);
414
415 case KVM_GET_VCPU_MMAP_SIZE:
416 case KVM_GET_API_VERSION:
417 default:
418 return RVAL_DECODED;
419 }
420 }
421
422 void
423 kvm_run_structure_decoder_init(void)
424 {
425 dump_kvm_run_structure = true;
426 mmap_cache_enable();
427 }
428
429 #endif /* HAVE_LINUX_KVM_H */