test_kasan.c (38189B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * 4 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com> 6 */ 7 8#include <linux/bitops.h> 9#include <linux/delay.h> 10#include <linux/kasan.h> 11#include <linux/kernel.h> 12#include <linux/mm.h> 13#include <linux/mman.h> 14#include <linux/module.h> 15#include <linux/printk.h> 16#include <linux/random.h> 17#include <linux/slab.h> 18#include <linux/string.h> 19#include <linux/uaccess.h> 20#include <linux/io.h> 21#include <linux/vmalloc.h> 22#include <linux/set_memory.h> 23 24#include <asm/page.h> 25 26#include <kunit/test.h> 27 28#include "../mm/kasan/kasan.h" 29 30#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE) 31 32/* 33 * Some tests use these global variables to store return values from function 34 * calls that could otherwise be eliminated by the compiler as dead code. 35 */ 36void *kasan_ptr_result; 37int kasan_int_result; 38 39static struct kunit_resource resource; 40static struct kunit_kasan_status test_status; 41static bool multishot; 42 43/* 44 * Temporarily enable multi-shot mode. Otherwise, KASAN would only report the 45 * first detected bug and panic the kernel if panic_on_warn is enabled. For 46 * hardware tag-based KASAN also allow tag checking to be reenabled for each 47 * test, see the comment for KUNIT_EXPECT_KASAN_FAIL(). 48 */ 49static int kasan_test_init(struct kunit *test) 50{ 51 if (!kasan_enabled()) { 52 kunit_err(test, "can't run KASAN tests with KASAN disabled"); 53 return -1; 54 } 55 56 multishot = kasan_save_enable_multi_shot(); 57 test_status.report_found = false; 58 test_status.sync_fault = false; 59 kunit_add_named_resource(test, NULL, NULL, &resource, 60 "kasan_status", &test_status); 61 return 0; 62} 63 64static void kasan_test_exit(struct kunit *test) 65{ 66 kasan_restore_multi_shot(multishot); 67 KUNIT_EXPECT_FALSE(test, test_status.report_found); 68} 69 70/** 71 * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a 72 * KASAN report; causes a test failure otherwise. This relies on a KUnit 73 * resource named "kasan_status". Do not use this name for KUnit resources 74 * outside of KASAN tests. 75 * 76 * For hardware tag-based KASAN, when a synchronous tag fault happens, tag 77 * checking is auto-disabled. When this happens, this test handler reenables 78 * tag checking. As tag checking can be only disabled or enabled per CPU, 79 * this handler disables migration (preemption). 80 * 81 * Since the compiler doesn't see that the expression can change the test_status 82 * fields, it can reorder or optimize away the accesses to those fields. 83 * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the 84 * expression to prevent that. 85 * 86 * In between KUNIT_EXPECT_KASAN_FAIL checks, test_status.report_found is kept 87 * as false. This allows detecting KASAN reports that happen outside of the 88 * checks by asserting !test_status.report_found at the start of 89 * KUNIT_EXPECT_KASAN_FAIL and in kasan_test_exit. 90 */ 91#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \ 92 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \ 93 kasan_sync_fault_possible()) \ 94 migrate_disable(); \ 95 KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found)); \ 96 barrier(); \ 97 expression; \ 98 barrier(); \ 99 if (kasan_async_fault_possible()) \ 100 kasan_force_async_fault(); \ 101 if (!READ_ONCE(test_status.report_found)) { \ 102 KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \ 103 "expected in \"" #expression \ 104 "\", but none occurred"); \ 105 } \ 106 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \ 107 kasan_sync_fault_possible()) { \ 108 if (READ_ONCE(test_status.report_found) && \ 109 READ_ONCE(test_status.sync_fault)) \ 110 kasan_enable_tagging(); \ 111 migrate_enable(); \ 112 } \ 113 WRITE_ONCE(test_status.report_found, false); \ 114} while (0) 115 116#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \ 117 if (!IS_ENABLED(config)) \ 118 kunit_skip((test), "Test requires " #config "=y"); \ 119} while (0) 120 121#define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \ 122 if (IS_ENABLED(config)) \ 123 kunit_skip((test), "Test requires " #config "=n"); \ 124} while (0) 125 126static void kmalloc_oob_right(struct kunit *test) 127{ 128 char *ptr; 129 size_t size = 128 - KASAN_GRANULE_SIZE - 5; 130 131 ptr = kmalloc(size, GFP_KERNEL); 132 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 133 134 /* 135 * An unaligned access past the requested kmalloc size. 136 * Only generic KASAN can precisely detect these. 137 */ 138 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 139 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x'); 140 141 /* 142 * An aligned access into the first out-of-bounds granule that falls 143 * within the aligned kmalloc object. 144 */ 145 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y'); 146 147 /* Out-of-bounds access past the aligned kmalloc object. */ 148 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 149 ptr[size + KASAN_GRANULE_SIZE + 5]); 150 151 kfree(ptr); 152} 153 154static void kmalloc_oob_left(struct kunit *test) 155{ 156 char *ptr; 157 size_t size = 15; 158 159 ptr = kmalloc(size, GFP_KERNEL); 160 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 161 162 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1)); 163 kfree(ptr); 164} 165 166static void kmalloc_node_oob_right(struct kunit *test) 167{ 168 char *ptr; 169 size_t size = 4096; 170 171 ptr = kmalloc_node(size, GFP_KERNEL, 0); 172 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 173 174 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]); 175 kfree(ptr); 176} 177 178/* 179 * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't 180 * fit into a slab cache and therefore is allocated via the page allocator 181 * fallback. Since this kind of fallback is only implemented for SLUB, these 182 * tests are limited to that allocator. 183 */ 184static void kmalloc_pagealloc_oob_right(struct kunit *test) 185{ 186 char *ptr; 187 size_t size = KMALLOC_MAX_CACHE_SIZE + 10; 188 189 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); 190 191 ptr = kmalloc(size, GFP_KERNEL); 192 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 193 194 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0); 195 196 kfree(ptr); 197} 198 199static void kmalloc_pagealloc_uaf(struct kunit *test) 200{ 201 char *ptr; 202 size_t size = KMALLOC_MAX_CACHE_SIZE + 10; 203 204 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); 205 206 ptr = kmalloc(size, GFP_KERNEL); 207 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 208 kfree(ptr); 209 210 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]); 211} 212 213static void kmalloc_pagealloc_invalid_free(struct kunit *test) 214{ 215 char *ptr; 216 size_t size = KMALLOC_MAX_CACHE_SIZE + 10; 217 218 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); 219 220 ptr = kmalloc(size, GFP_KERNEL); 221 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 222 223 KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1)); 224} 225 226static void pagealloc_oob_right(struct kunit *test) 227{ 228 char *ptr; 229 struct page *pages; 230 size_t order = 4; 231 size_t size = (1UL << (PAGE_SHIFT + order)); 232 233 /* 234 * With generic KASAN page allocations have no redzones, thus 235 * out-of-bounds detection is not guaranteed. 236 * See https://bugzilla.kernel.org/show_bug.cgi?id=210503. 237 */ 238 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); 239 240 pages = alloc_pages(GFP_KERNEL, order); 241 ptr = page_address(pages); 242 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 243 244 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]); 245 free_pages((unsigned long)ptr, order); 246} 247 248static void pagealloc_uaf(struct kunit *test) 249{ 250 char *ptr; 251 struct page *pages; 252 size_t order = 4; 253 254 pages = alloc_pages(GFP_KERNEL, order); 255 ptr = page_address(pages); 256 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 257 free_pages((unsigned long)ptr, order); 258 259 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]); 260} 261 262static void kmalloc_large_oob_right(struct kunit *test) 263{ 264 char *ptr; 265 size_t size = KMALLOC_MAX_CACHE_SIZE - 256; 266 267 /* 268 * Allocate a chunk that is large enough, but still fits into a slab 269 * and does not trigger the page allocator fallback in SLUB. 270 */ 271 ptr = kmalloc(size, GFP_KERNEL); 272 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 273 274 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0); 275 kfree(ptr); 276} 277 278static void krealloc_more_oob_helper(struct kunit *test, 279 size_t size1, size_t size2) 280{ 281 char *ptr1, *ptr2; 282 size_t middle; 283 284 KUNIT_ASSERT_LT(test, size1, size2); 285 middle = size1 + (size2 - size1) / 2; 286 287 ptr1 = kmalloc(size1, GFP_KERNEL); 288 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); 289 290 ptr2 = krealloc(ptr1, size2, GFP_KERNEL); 291 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); 292 293 /* All offsets up to size2 must be accessible. */ 294 ptr2[size1 - 1] = 'x'; 295 ptr2[size1] = 'x'; 296 ptr2[middle] = 'x'; 297 ptr2[size2 - 1] = 'x'; 298 299 /* Generic mode is precise, so unaligned size2 must be inaccessible. */ 300 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 301 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x'); 302 303 /* For all modes first aligned offset after size2 must be inaccessible. */ 304 KUNIT_EXPECT_KASAN_FAIL(test, 305 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x'); 306 307 kfree(ptr2); 308} 309 310static void krealloc_less_oob_helper(struct kunit *test, 311 size_t size1, size_t size2) 312{ 313 char *ptr1, *ptr2; 314 size_t middle; 315 316 KUNIT_ASSERT_LT(test, size2, size1); 317 middle = size2 + (size1 - size2) / 2; 318 319 ptr1 = kmalloc(size1, GFP_KERNEL); 320 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); 321 322 ptr2 = krealloc(ptr1, size2, GFP_KERNEL); 323 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); 324 325 /* Must be accessible for all modes. */ 326 ptr2[size2 - 1] = 'x'; 327 328 /* Generic mode is precise, so unaligned size2 must be inaccessible. */ 329 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 330 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x'); 331 332 /* For all modes first aligned offset after size2 must be inaccessible. */ 333 KUNIT_EXPECT_KASAN_FAIL(test, 334 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x'); 335 336 /* 337 * For all modes all size2, middle, and size1 should land in separate 338 * granules and thus the latter two offsets should be inaccessible. 339 */ 340 KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE), 341 round_down(middle, KASAN_GRANULE_SIZE)); 342 KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE), 343 round_down(size1, KASAN_GRANULE_SIZE)); 344 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x'); 345 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x'); 346 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x'); 347 348 kfree(ptr2); 349} 350 351static void krealloc_more_oob(struct kunit *test) 352{ 353 krealloc_more_oob_helper(test, 201, 235); 354} 355 356static void krealloc_less_oob(struct kunit *test) 357{ 358 krealloc_less_oob_helper(test, 235, 201); 359} 360 361static void krealloc_pagealloc_more_oob(struct kunit *test) 362{ 363 /* page_alloc fallback in only implemented for SLUB. */ 364 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); 365 366 krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201, 367 KMALLOC_MAX_CACHE_SIZE + 235); 368} 369 370static void krealloc_pagealloc_less_oob(struct kunit *test) 371{ 372 /* page_alloc fallback in only implemented for SLUB. */ 373 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); 374 375 krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235, 376 KMALLOC_MAX_CACHE_SIZE + 201); 377} 378 379/* 380 * Check that krealloc() detects a use-after-free, returns NULL, 381 * and doesn't unpoison the freed object. 382 */ 383static void krealloc_uaf(struct kunit *test) 384{ 385 char *ptr1, *ptr2; 386 int size1 = 201; 387 int size2 = 235; 388 389 ptr1 = kmalloc(size1, GFP_KERNEL); 390 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); 391 kfree(ptr1); 392 393 KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL)); 394 KUNIT_ASSERT_NULL(test, ptr2); 395 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1); 396} 397 398static void kmalloc_oob_16(struct kunit *test) 399{ 400 struct { 401 u64 words[2]; 402 } *ptr1, *ptr2; 403 404 /* This test is specifically crafted for the generic mode. */ 405 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); 406 407 ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL); 408 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); 409 410 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL); 411 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); 412 413 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2); 414 kfree(ptr1); 415 kfree(ptr2); 416} 417 418static void kmalloc_uaf_16(struct kunit *test) 419{ 420 struct { 421 u64 words[2]; 422 } *ptr1, *ptr2; 423 424 ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL); 425 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); 426 427 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL); 428 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); 429 kfree(ptr2); 430 431 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2); 432 kfree(ptr1); 433} 434 435/* 436 * Note: in the memset tests below, the written range touches both valid and 437 * invalid memory. This makes sure that the instrumentation does not only check 438 * the starting address but the whole range. 439 */ 440 441static void kmalloc_oob_memset_2(struct kunit *test) 442{ 443 char *ptr; 444 size_t size = 128 - KASAN_GRANULE_SIZE; 445 446 ptr = kmalloc(size, GFP_KERNEL); 447 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 448 449 OPTIMIZER_HIDE_VAR(size); 450 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, 2)); 451 kfree(ptr); 452} 453 454static void kmalloc_oob_memset_4(struct kunit *test) 455{ 456 char *ptr; 457 size_t size = 128 - KASAN_GRANULE_SIZE; 458 459 ptr = kmalloc(size, GFP_KERNEL); 460 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 461 462 OPTIMIZER_HIDE_VAR(size); 463 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, 4)); 464 kfree(ptr); 465} 466 467static void kmalloc_oob_memset_8(struct kunit *test) 468{ 469 char *ptr; 470 size_t size = 128 - KASAN_GRANULE_SIZE; 471 472 ptr = kmalloc(size, GFP_KERNEL); 473 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 474 475 OPTIMIZER_HIDE_VAR(size); 476 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, 8)); 477 kfree(ptr); 478} 479 480static void kmalloc_oob_memset_16(struct kunit *test) 481{ 482 char *ptr; 483 size_t size = 128 - KASAN_GRANULE_SIZE; 484 485 ptr = kmalloc(size, GFP_KERNEL); 486 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 487 488 OPTIMIZER_HIDE_VAR(size); 489 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, 16)); 490 kfree(ptr); 491} 492 493static void kmalloc_oob_in_memset(struct kunit *test) 494{ 495 char *ptr; 496 size_t size = 128 - KASAN_GRANULE_SIZE; 497 498 ptr = kmalloc(size, GFP_KERNEL); 499 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 500 501 OPTIMIZER_HIDE_VAR(ptr); 502 OPTIMIZER_HIDE_VAR(size); 503 KUNIT_EXPECT_KASAN_FAIL(test, 504 memset(ptr, 0, size + KASAN_GRANULE_SIZE)); 505 kfree(ptr); 506} 507 508static void kmalloc_memmove_negative_size(struct kunit *test) 509{ 510 char *ptr; 511 size_t size = 64; 512 size_t invalid_size = -2; 513 514 /* 515 * Hardware tag-based mode doesn't check memmove for negative size. 516 * As a result, this test introduces a side-effect memory corruption, 517 * which can result in a crash. 518 */ 519 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS); 520 521 ptr = kmalloc(size, GFP_KERNEL); 522 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 523 524 memset((char *)ptr, 0, 64); 525 OPTIMIZER_HIDE_VAR(ptr); 526 OPTIMIZER_HIDE_VAR(invalid_size); 527 KUNIT_EXPECT_KASAN_FAIL(test, 528 memmove((char *)ptr, (char *)ptr + 4, invalid_size)); 529 kfree(ptr); 530} 531 532static void kmalloc_memmove_invalid_size(struct kunit *test) 533{ 534 char *ptr; 535 size_t size = 64; 536 volatile size_t invalid_size = size; 537 538 ptr = kmalloc(size, GFP_KERNEL); 539 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 540 541 memset((char *)ptr, 0, 64); 542 OPTIMIZER_HIDE_VAR(ptr); 543 KUNIT_EXPECT_KASAN_FAIL(test, 544 memmove((char *)ptr, (char *)ptr + 4, invalid_size)); 545 kfree(ptr); 546} 547 548static void kmalloc_uaf(struct kunit *test) 549{ 550 char *ptr; 551 size_t size = 10; 552 553 ptr = kmalloc(size, GFP_KERNEL); 554 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 555 556 kfree(ptr); 557 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]); 558} 559 560static void kmalloc_uaf_memset(struct kunit *test) 561{ 562 char *ptr; 563 size_t size = 33; 564 565 /* 566 * Only generic KASAN uses quarantine, which is required to avoid a 567 * kernel memory corruption this test causes. 568 */ 569 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); 570 571 ptr = kmalloc(size, GFP_KERNEL); 572 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 573 574 kfree(ptr); 575 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size)); 576} 577 578static void kmalloc_uaf2(struct kunit *test) 579{ 580 char *ptr1, *ptr2; 581 size_t size = 43; 582 int counter = 0; 583 584again: 585 ptr1 = kmalloc(size, GFP_KERNEL); 586 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); 587 588 kfree(ptr1); 589 590 ptr2 = kmalloc(size, GFP_KERNEL); 591 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); 592 593 /* 594 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same. 595 * Allow up to 16 attempts at generating different tags. 596 */ 597 if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) { 598 kfree(ptr2); 599 goto again; 600 } 601 602 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]); 603 KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2); 604 605 kfree(ptr2); 606} 607 608static void kfree_via_page(struct kunit *test) 609{ 610 char *ptr; 611 size_t size = 8; 612 struct page *page; 613 unsigned long offset; 614 615 ptr = kmalloc(size, GFP_KERNEL); 616 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 617 618 page = virt_to_page(ptr); 619 offset = offset_in_page(ptr); 620 kfree(page_address(page) + offset); 621} 622 623static void kfree_via_phys(struct kunit *test) 624{ 625 char *ptr; 626 size_t size = 8; 627 phys_addr_t phys; 628 629 ptr = kmalloc(size, GFP_KERNEL); 630 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 631 632 phys = virt_to_phys(ptr); 633 kfree(phys_to_virt(phys)); 634} 635 636static void kmem_cache_oob(struct kunit *test) 637{ 638 char *p; 639 size_t size = 200; 640 struct kmem_cache *cache; 641 642 cache = kmem_cache_create("test_cache", size, 0, 0, NULL); 643 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); 644 645 p = kmem_cache_alloc(cache, GFP_KERNEL); 646 if (!p) { 647 kunit_err(test, "Allocation failed: %s\n", __func__); 648 kmem_cache_destroy(cache); 649 return; 650 } 651 652 KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]); 653 654 kmem_cache_free(cache, p); 655 kmem_cache_destroy(cache); 656} 657 658static void kmem_cache_accounted(struct kunit *test) 659{ 660 int i; 661 char *p; 662 size_t size = 200; 663 struct kmem_cache *cache; 664 665 cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL); 666 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); 667 668 /* 669 * Several allocations with a delay to allow for lazy per memcg kmem 670 * cache creation. 671 */ 672 for (i = 0; i < 5; i++) { 673 p = kmem_cache_alloc(cache, GFP_KERNEL); 674 if (!p) 675 goto free_cache; 676 677 kmem_cache_free(cache, p); 678 msleep(100); 679 } 680 681free_cache: 682 kmem_cache_destroy(cache); 683} 684 685static void kmem_cache_bulk(struct kunit *test) 686{ 687 struct kmem_cache *cache; 688 size_t size = 200; 689 char *p[10]; 690 bool ret; 691 int i; 692 693 cache = kmem_cache_create("test_cache", size, 0, 0, NULL); 694 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); 695 696 ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p); 697 if (!ret) { 698 kunit_err(test, "Allocation failed: %s\n", __func__); 699 kmem_cache_destroy(cache); 700 return; 701 } 702 703 for (i = 0; i < ARRAY_SIZE(p); i++) 704 p[i][0] = p[i][size - 1] = 42; 705 706 kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p); 707 kmem_cache_destroy(cache); 708} 709 710static char global_array[10]; 711 712static void kasan_global_oob_right(struct kunit *test) 713{ 714 /* 715 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS 716 * from failing here and panicking the kernel, access the array via a 717 * volatile pointer, which will prevent the compiler from being able to 718 * determine the array bounds. 719 * 720 * This access uses a volatile pointer to char (char *volatile) rather 721 * than the more conventional pointer to volatile char (volatile char *) 722 * because we want to prevent the compiler from making inferences about 723 * the pointer itself (i.e. its array bounds), not the data that it 724 * refers to. 725 */ 726 char *volatile array = global_array; 727 char *p = &array[ARRAY_SIZE(global_array) + 3]; 728 729 /* Only generic mode instruments globals. */ 730 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); 731 732 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); 733} 734 735static void kasan_global_oob_left(struct kunit *test) 736{ 737 char *volatile array = global_array; 738 char *p = array - 3; 739 740 /* 741 * GCC is known to fail this test, skip it. 742 * See https://bugzilla.kernel.org/show_bug.cgi?id=215051. 743 */ 744 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_CC_IS_CLANG); 745 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); 746 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); 747} 748 749/* Check that ksize() makes the whole object accessible. */ 750static void ksize_unpoisons_memory(struct kunit *test) 751{ 752 char *ptr; 753 size_t size = 123, real_size; 754 755 ptr = kmalloc(size, GFP_KERNEL); 756 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 757 real_size = ksize(ptr); 758 759 /* This access shouldn't trigger a KASAN report. */ 760 ptr[size] = 'x'; 761 762 /* This one must. */ 763 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]); 764 765 kfree(ptr); 766} 767 768/* 769 * Check that a use-after-free is detected by ksize() and via normal accesses 770 * after it. 771 */ 772static void ksize_uaf(struct kunit *test) 773{ 774 char *ptr; 775 int size = 128 - KASAN_GRANULE_SIZE; 776 777 ptr = kmalloc(size, GFP_KERNEL); 778 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 779 kfree(ptr); 780 781 KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr)); 782 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]); 783 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]); 784} 785 786static void kasan_stack_oob(struct kunit *test) 787{ 788 char stack_array[10]; 789 /* See comment in kasan_global_oob_right. */ 790 char *volatile array = stack_array; 791 char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF]; 792 793 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK); 794 795 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); 796} 797 798static void kasan_alloca_oob_left(struct kunit *test) 799{ 800 volatile int i = 10; 801 char alloca_array[i]; 802 /* See comment in kasan_global_oob_right. */ 803 char *volatile array = alloca_array; 804 char *p = array - 1; 805 806 /* Only generic mode instruments dynamic allocas. */ 807 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); 808 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK); 809 810 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); 811} 812 813static void kasan_alloca_oob_right(struct kunit *test) 814{ 815 volatile int i = 10; 816 char alloca_array[i]; 817 /* See comment in kasan_global_oob_right. */ 818 char *volatile array = alloca_array; 819 char *p = array + i; 820 821 /* Only generic mode instruments dynamic allocas. */ 822 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); 823 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK); 824 825 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); 826} 827 828static void kmem_cache_double_free(struct kunit *test) 829{ 830 char *p; 831 size_t size = 200; 832 struct kmem_cache *cache; 833 834 cache = kmem_cache_create("test_cache", size, 0, 0, NULL); 835 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); 836 837 p = kmem_cache_alloc(cache, GFP_KERNEL); 838 if (!p) { 839 kunit_err(test, "Allocation failed: %s\n", __func__); 840 kmem_cache_destroy(cache); 841 return; 842 } 843 844 kmem_cache_free(cache, p); 845 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p)); 846 kmem_cache_destroy(cache); 847} 848 849static void kmem_cache_invalid_free(struct kunit *test) 850{ 851 char *p; 852 size_t size = 200; 853 struct kmem_cache *cache; 854 855 cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU, 856 NULL); 857 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); 858 859 p = kmem_cache_alloc(cache, GFP_KERNEL); 860 if (!p) { 861 kunit_err(test, "Allocation failed: %s\n", __func__); 862 kmem_cache_destroy(cache); 863 return; 864 } 865 866 /* Trigger invalid free, the object doesn't get freed. */ 867 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1)); 868 869 /* 870 * Properly free the object to prevent the "Objects remaining in 871 * test_cache on __kmem_cache_shutdown" BUG failure. 872 */ 873 kmem_cache_free(cache, p); 874 875 kmem_cache_destroy(cache); 876} 877 878static void empty_cache_ctor(void *object) { } 879 880static void kmem_cache_double_destroy(struct kunit *test) 881{ 882 struct kmem_cache *cache; 883 884 /* Provide a constructor to prevent cache merging. */ 885 cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor); 886 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); 887 kmem_cache_destroy(cache); 888 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache)); 889} 890 891static void kasan_memchr(struct kunit *test) 892{ 893 char *ptr; 894 size_t size = 24; 895 896 /* 897 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT. 898 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details. 899 */ 900 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT); 901 902 if (OOB_TAG_OFF) 903 size = round_up(size, OOB_TAG_OFF); 904 905 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); 906 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 907 908 OPTIMIZER_HIDE_VAR(ptr); 909 OPTIMIZER_HIDE_VAR(size); 910 KUNIT_EXPECT_KASAN_FAIL(test, 911 kasan_ptr_result = memchr(ptr, '1', size + 1)); 912 913 kfree(ptr); 914} 915 916static void kasan_memcmp(struct kunit *test) 917{ 918 char *ptr; 919 size_t size = 24; 920 int arr[9]; 921 922 /* 923 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT. 924 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details. 925 */ 926 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT); 927 928 if (OOB_TAG_OFF) 929 size = round_up(size, OOB_TAG_OFF); 930 931 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); 932 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 933 memset(arr, 0, sizeof(arr)); 934 935 OPTIMIZER_HIDE_VAR(ptr); 936 OPTIMIZER_HIDE_VAR(size); 937 KUNIT_EXPECT_KASAN_FAIL(test, 938 kasan_int_result = memcmp(ptr, arr, size+1)); 939 kfree(ptr); 940} 941 942static void kasan_strings(struct kunit *test) 943{ 944 char *ptr; 945 size_t size = 24; 946 947 /* 948 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT. 949 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details. 950 */ 951 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT); 952 953 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); 954 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 955 956 kfree(ptr); 957 958 /* 959 * Try to cause only 1 invalid access (less spam in dmesg). 960 * For that we need ptr to point to zeroed byte. 961 * Skip metadata that could be stored in freed object so ptr 962 * will likely point to zeroed byte. 963 */ 964 ptr += 16; 965 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1')); 966 967 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1')); 968 969 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2")); 970 971 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1)); 972 973 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr)); 974 975 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1)); 976} 977 978static void kasan_bitops_modify(struct kunit *test, int nr, void *addr) 979{ 980 KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr)); 981 KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr)); 982 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr)); 983 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr)); 984 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr)); 985 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr)); 986 KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr)); 987 KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr)); 988} 989 990static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr) 991{ 992 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr)); 993 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr)); 994 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr)); 995 KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr)); 996 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr)); 997 KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr)); 998 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr)); 999 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr)); 1000 1001#if defined(clear_bit_unlock_is_negative_byte) 1002 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = 1003 clear_bit_unlock_is_negative_byte(nr, addr)); 1004#endif 1005} 1006 1007static void kasan_bitops_generic(struct kunit *test) 1008{ 1009 long *bits; 1010 1011 /* This test is specifically crafted for the generic mode. */ 1012 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); 1013 1014 /* 1015 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes; 1016 * this way we do not actually corrupt other memory. 1017 */ 1018 bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL); 1019 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits); 1020 1021 /* 1022 * Below calls try to access bit within allocated memory; however, the 1023 * below accesses are still out-of-bounds, since bitops are defined to 1024 * operate on the whole long the bit is in. 1025 */ 1026 kasan_bitops_modify(test, BITS_PER_LONG, bits); 1027 1028 /* 1029 * Below calls try to access bit beyond allocated memory. 1030 */ 1031 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits); 1032 1033 kfree(bits); 1034} 1035 1036static void kasan_bitops_tags(struct kunit *test) 1037{ 1038 long *bits; 1039 1040 /* This test is specifically crafted for tag-based modes. */ 1041 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); 1042 1043 /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */ 1044 bits = kzalloc(48, GFP_KERNEL); 1045 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits); 1046 1047 /* Do the accesses past the 48 allocated bytes, but within the redone. */ 1048 kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48); 1049 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48); 1050 1051 kfree(bits); 1052} 1053 1054static void kmalloc_double_kzfree(struct kunit *test) 1055{ 1056 char *ptr; 1057 size_t size = 16; 1058 1059 ptr = kmalloc(size, GFP_KERNEL); 1060 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1061 1062 kfree_sensitive(ptr); 1063 KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr)); 1064} 1065 1066static void vmalloc_helpers_tags(struct kunit *test) 1067{ 1068 void *ptr; 1069 1070 /* This test is intended for tag-based modes. */ 1071 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); 1072 1073 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC); 1074 1075 ptr = vmalloc(PAGE_SIZE); 1076 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1077 1078 /* Check that the returned pointer is tagged. */ 1079 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN); 1080 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL); 1081 1082 /* Make sure exported vmalloc helpers handle tagged pointers. */ 1083 KUNIT_ASSERT_TRUE(test, is_vmalloc_addr(ptr)); 1084 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vmalloc_to_page(ptr)); 1085 1086#if !IS_MODULE(CONFIG_KASAN_KUNIT_TEST) 1087 { 1088 int rv; 1089 1090 /* Make sure vmalloc'ed memory permissions can be changed. */ 1091 rv = set_memory_ro((unsigned long)ptr, 1); 1092 KUNIT_ASSERT_GE(test, rv, 0); 1093 rv = set_memory_rw((unsigned long)ptr, 1); 1094 KUNIT_ASSERT_GE(test, rv, 0); 1095 } 1096#endif 1097 1098 vfree(ptr); 1099} 1100 1101static void vmalloc_oob(struct kunit *test) 1102{ 1103 char *v_ptr, *p_ptr; 1104 struct page *page; 1105 size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5; 1106 1107 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC); 1108 1109 v_ptr = vmalloc(size); 1110 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr); 1111 1112 OPTIMIZER_HIDE_VAR(v_ptr); 1113 1114 /* 1115 * We have to be careful not to hit the guard page in vmalloc tests. 1116 * The MMU will catch that and crash us. 1117 */ 1118 1119 /* Make sure in-bounds accesses are valid. */ 1120 v_ptr[0] = 0; 1121 v_ptr[size - 1] = 0; 1122 1123 /* 1124 * An unaligned access past the requested vmalloc size. 1125 * Only generic KASAN can precisely detect these. 1126 */ 1127 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 1128 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size]); 1129 1130 /* An aligned access into the first out-of-bounds granule. */ 1131 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size + 5]); 1132 1133 /* Check that in-bounds accesses to the physical page are valid. */ 1134 page = vmalloc_to_page(v_ptr); 1135 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page); 1136 p_ptr = page_address(page); 1137 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr); 1138 p_ptr[0] = 0; 1139 1140 vfree(v_ptr); 1141 1142 /* 1143 * We can't check for use-after-unmap bugs in this nor in the following 1144 * vmalloc tests, as the page might be fully unmapped and accessing it 1145 * will crash the kernel. 1146 */ 1147} 1148 1149static void vmap_tags(struct kunit *test) 1150{ 1151 char *p_ptr, *v_ptr; 1152 struct page *p_page, *v_page; 1153 1154 /* 1155 * This test is specifically crafted for the software tag-based mode, 1156 * the only tag-based mode that poisons vmap mappings. 1157 */ 1158 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS); 1159 1160 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC); 1161 1162 p_page = alloc_pages(GFP_KERNEL, 1); 1163 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_page); 1164 p_ptr = page_address(p_page); 1165 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr); 1166 1167 v_ptr = vmap(&p_page, 1, VM_MAP, PAGE_KERNEL); 1168 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr); 1169 1170 /* 1171 * We can't check for out-of-bounds bugs in this nor in the following 1172 * vmalloc tests, as allocations have page granularity and accessing 1173 * the guard page will crash the kernel. 1174 */ 1175 1176 KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN); 1177 KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL); 1178 1179 /* Make sure that in-bounds accesses through both pointers work. */ 1180 *p_ptr = 0; 1181 *v_ptr = 0; 1182 1183 /* Make sure vmalloc_to_page() correctly recovers the page pointer. */ 1184 v_page = vmalloc_to_page(v_ptr); 1185 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_page); 1186 KUNIT_EXPECT_PTR_EQ(test, p_page, v_page); 1187 1188 vunmap(v_ptr); 1189 free_pages((unsigned long)p_ptr, 1); 1190} 1191 1192static void vm_map_ram_tags(struct kunit *test) 1193{ 1194 char *p_ptr, *v_ptr; 1195 struct page *page; 1196 1197 /* 1198 * This test is specifically crafted for the software tag-based mode, 1199 * the only tag-based mode that poisons vm_map_ram mappings. 1200 */ 1201 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS); 1202 1203 page = alloc_pages(GFP_KERNEL, 1); 1204 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page); 1205 p_ptr = page_address(page); 1206 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr); 1207 1208 v_ptr = vm_map_ram(&page, 1, -1); 1209 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr); 1210 1211 KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN); 1212 KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL); 1213 1214 /* Make sure that in-bounds accesses through both pointers work. */ 1215 *p_ptr = 0; 1216 *v_ptr = 0; 1217 1218 vm_unmap_ram(v_ptr, 1); 1219 free_pages((unsigned long)p_ptr, 1); 1220} 1221 1222static void vmalloc_percpu(struct kunit *test) 1223{ 1224 char __percpu *ptr; 1225 int cpu; 1226 1227 /* 1228 * This test is specifically crafted for the software tag-based mode, 1229 * the only tag-based mode that poisons percpu mappings. 1230 */ 1231 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS); 1232 1233 ptr = __alloc_percpu(PAGE_SIZE, PAGE_SIZE); 1234 1235 for_each_possible_cpu(cpu) { 1236 char *c_ptr = per_cpu_ptr(ptr, cpu); 1237 1238 KUNIT_EXPECT_GE(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_MIN); 1239 KUNIT_EXPECT_LT(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_KERNEL); 1240 1241 /* Make sure that in-bounds accesses don't crash the kernel. */ 1242 *c_ptr = 0; 1243 } 1244 1245 free_percpu(ptr); 1246} 1247 1248/* 1249 * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN, 1250 * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based 1251 * modes. 1252 */ 1253static void match_all_not_assigned(struct kunit *test) 1254{ 1255 char *ptr; 1256 struct page *pages; 1257 int i, size, order; 1258 1259 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); 1260 1261 for (i = 0; i < 256; i++) { 1262 size = (get_random_int() % 1024) + 1; 1263 ptr = kmalloc(size, GFP_KERNEL); 1264 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1265 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN); 1266 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL); 1267 kfree(ptr); 1268 } 1269 1270 for (i = 0; i < 256; i++) { 1271 order = (get_random_int() % 4) + 1; 1272 pages = alloc_pages(GFP_KERNEL, order); 1273 ptr = page_address(pages); 1274 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1275 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN); 1276 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL); 1277 free_pages((unsigned long)ptr, order); 1278 } 1279 1280 if (!IS_ENABLED(CONFIG_KASAN_VMALLOC)) 1281 return; 1282 1283 for (i = 0; i < 256; i++) { 1284 size = (get_random_int() % 1024) + 1; 1285 ptr = vmalloc(size); 1286 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1287 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN); 1288 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL); 1289 vfree(ptr); 1290 } 1291} 1292 1293/* Check that 0xff works as a match-all pointer tag for tag-based modes. */ 1294static void match_all_ptr_tag(struct kunit *test) 1295{ 1296 char *ptr; 1297 u8 tag; 1298 1299 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); 1300 1301 ptr = kmalloc(128, GFP_KERNEL); 1302 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1303 1304 /* Backup the assigned tag. */ 1305 tag = get_tag(ptr); 1306 KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL); 1307 1308 /* Reset the tag to 0xff.*/ 1309 ptr = set_tag(ptr, KASAN_TAG_KERNEL); 1310 1311 /* This access shouldn't trigger a KASAN report. */ 1312 *ptr = 0; 1313 1314 /* Recover the pointer tag and free. */ 1315 ptr = set_tag(ptr, tag); 1316 kfree(ptr); 1317} 1318 1319/* Check that there are no match-all memory tags for tag-based modes. */ 1320static void match_all_mem_tag(struct kunit *test) 1321{ 1322 char *ptr; 1323 int tag; 1324 1325 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); 1326 1327 ptr = kmalloc(128, GFP_KERNEL); 1328 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1329 KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL); 1330 1331 /* For each possible tag value not matching the pointer tag. */ 1332 for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) { 1333 if (tag == get_tag(ptr)) 1334 continue; 1335 1336 /* Mark the first memory granule with the chosen memory tag. */ 1337 kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false); 1338 1339 /* This access must cause a KASAN report. */ 1340 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0); 1341 } 1342 1343 /* Recover the memory tag and free. */ 1344 kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false); 1345 kfree(ptr); 1346} 1347 1348static struct kunit_case kasan_kunit_test_cases[] = { 1349 KUNIT_CASE(kmalloc_oob_right), 1350 KUNIT_CASE(kmalloc_oob_left), 1351 KUNIT_CASE(kmalloc_node_oob_right), 1352 KUNIT_CASE(kmalloc_pagealloc_oob_right), 1353 KUNIT_CASE(kmalloc_pagealloc_uaf), 1354 KUNIT_CASE(kmalloc_pagealloc_invalid_free), 1355 KUNIT_CASE(pagealloc_oob_right), 1356 KUNIT_CASE(pagealloc_uaf), 1357 KUNIT_CASE(kmalloc_large_oob_right), 1358 KUNIT_CASE(krealloc_more_oob), 1359 KUNIT_CASE(krealloc_less_oob), 1360 KUNIT_CASE(krealloc_pagealloc_more_oob), 1361 KUNIT_CASE(krealloc_pagealloc_less_oob), 1362 KUNIT_CASE(krealloc_uaf), 1363 KUNIT_CASE(kmalloc_oob_16), 1364 KUNIT_CASE(kmalloc_uaf_16), 1365 KUNIT_CASE(kmalloc_oob_in_memset), 1366 KUNIT_CASE(kmalloc_oob_memset_2), 1367 KUNIT_CASE(kmalloc_oob_memset_4), 1368 KUNIT_CASE(kmalloc_oob_memset_8), 1369 KUNIT_CASE(kmalloc_oob_memset_16), 1370 KUNIT_CASE(kmalloc_memmove_negative_size), 1371 KUNIT_CASE(kmalloc_memmove_invalid_size), 1372 KUNIT_CASE(kmalloc_uaf), 1373 KUNIT_CASE(kmalloc_uaf_memset), 1374 KUNIT_CASE(kmalloc_uaf2), 1375 KUNIT_CASE(kfree_via_page), 1376 KUNIT_CASE(kfree_via_phys), 1377 KUNIT_CASE(kmem_cache_oob), 1378 KUNIT_CASE(kmem_cache_accounted), 1379 KUNIT_CASE(kmem_cache_bulk), 1380 KUNIT_CASE(kasan_global_oob_right), 1381 KUNIT_CASE(kasan_global_oob_left), 1382 KUNIT_CASE(kasan_stack_oob), 1383 KUNIT_CASE(kasan_alloca_oob_left), 1384 KUNIT_CASE(kasan_alloca_oob_right), 1385 KUNIT_CASE(ksize_unpoisons_memory), 1386 KUNIT_CASE(ksize_uaf), 1387 KUNIT_CASE(kmem_cache_double_free), 1388 KUNIT_CASE(kmem_cache_invalid_free), 1389 KUNIT_CASE(kmem_cache_double_destroy), 1390 KUNIT_CASE(kasan_memchr), 1391 KUNIT_CASE(kasan_memcmp), 1392 KUNIT_CASE(kasan_strings), 1393 KUNIT_CASE(kasan_bitops_generic), 1394 KUNIT_CASE(kasan_bitops_tags), 1395 KUNIT_CASE(kmalloc_double_kzfree), 1396 KUNIT_CASE(vmalloc_helpers_tags), 1397 KUNIT_CASE(vmalloc_oob), 1398 KUNIT_CASE(vmap_tags), 1399 KUNIT_CASE(vm_map_ram_tags), 1400 KUNIT_CASE(vmalloc_percpu), 1401 KUNIT_CASE(match_all_not_assigned), 1402 KUNIT_CASE(match_all_ptr_tag), 1403 KUNIT_CASE(match_all_mem_tag), 1404 {} 1405}; 1406 1407static struct kunit_suite kasan_kunit_test_suite = { 1408 .name = "kasan", 1409 .init = kasan_test_init, 1410 .test_cases = kasan_kunit_test_cases, 1411 .exit = kasan_test_exit, 1412}; 1413 1414kunit_test_suite(kasan_kunit_test_suite); 1415 1416MODULE_LICENSE("GPL");