kvm_pgtable.h (20429B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2020 Google LLC 4 * Author: Will Deacon <will@kernel.org> 5 */ 6 7#ifndef __ARM64_KVM_PGTABLE_H__ 8#define __ARM64_KVM_PGTABLE_H__ 9 10#include <linux/bits.h> 11#include <linux/kvm_host.h> 12#include <linux/types.h> 13 14#define KVM_PGTABLE_MAX_LEVELS 4U 15 16static inline u64 kvm_get_parange(u64 mmfr0) 17{ 18 u64 parange = cpuid_feature_extract_unsigned_field(mmfr0, 19 ID_AA64MMFR0_PARANGE_SHIFT); 20 if (parange > ID_AA64MMFR0_PARANGE_MAX) 21 parange = ID_AA64MMFR0_PARANGE_MAX; 22 23 return parange; 24} 25 26typedef u64 kvm_pte_t; 27 28#define KVM_PTE_VALID BIT(0) 29 30#define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT) 31#define KVM_PTE_ADDR_51_48 GENMASK(15, 12) 32 33static inline bool kvm_pte_valid(kvm_pte_t pte) 34{ 35 return pte & KVM_PTE_VALID; 36} 37 38static inline u64 kvm_pte_to_phys(kvm_pte_t pte) 39{ 40 u64 pa = pte & KVM_PTE_ADDR_MASK; 41 42 if (PAGE_SHIFT == 16) 43 pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48; 44 45 return pa; 46} 47 48static inline u64 kvm_granule_shift(u32 level) 49{ 50 /* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */ 51 return ARM64_HW_PGTABLE_LEVEL_SHIFT(level); 52} 53 54static inline u64 kvm_granule_size(u32 level) 55{ 56 return BIT(kvm_granule_shift(level)); 57} 58 59static inline bool kvm_level_supports_block_mapping(u32 level) 60{ 61 /* 62 * Reject invalid block mappings and don't bother with 4TB mappings for 63 * 52-bit PAs. 64 */ 65 return !(level == 0 || (PAGE_SIZE != SZ_4K && level == 1)); 66} 67 68/** 69 * struct kvm_pgtable_mm_ops - Memory management callbacks. 70 * @zalloc_page: Allocate a single zeroed memory page. 71 * The @arg parameter can be used by the walker 72 * to pass a memcache. The initial refcount of 73 * the page is 1. 74 * @zalloc_pages_exact: Allocate an exact number of zeroed memory pages. 75 * The @size parameter is in bytes, and is rounded 76 * up to the next page boundary. The resulting 77 * allocation is physically contiguous. 78 * @free_pages_exact: Free an exact number of memory pages previously 79 * allocated by zalloc_pages_exact. 80 * @get_page: Increment the refcount on a page. 81 * @put_page: Decrement the refcount on a page. When the 82 * refcount reaches 0 the page is automatically 83 * freed. 84 * @page_count: Return the refcount of a page. 85 * @phys_to_virt: Convert a physical address into a virtual 86 * address mapped in the current context. 87 * @virt_to_phys: Convert a virtual address mapped in the current 88 * context into a physical address. 89 * @dcache_clean_inval_poc: Clean and invalidate the data cache to the PoC 90 * for the specified memory address range. 91 * @icache_inval_pou: Invalidate the instruction cache to the PoU 92 * for the specified memory address range. 93 */ 94struct kvm_pgtable_mm_ops { 95 void* (*zalloc_page)(void *arg); 96 void* (*zalloc_pages_exact)(size_t size); 97 void (*free_pages_exact)(void *addr, size_t size); 98 void (*get_page)(void *addr); 99 void (*put_page)(void *addr); 100 int (*page_count)(void *addr); 101 void* (*phys_to_virt)(phys_addr_t phys); 102 phys_addr_t (*virt_to_phys)(void *addr); 103 void (*dcache_clean_inval_poc)(void *addr, size_t size); 104 void (*icache_inval_pou)(void *addr, size_t size); 105}; 106 107/** 108 * enum kvm_pgtable_stage2_flags - Stage-2 page-table flags. 109 * @KVM_PGTABLE_S2_NOFWB: Don't enforce Normal-WB even if the CPUs have 110 * ARM64_HAS_STAGE2_FWB. 111 * @KVM_PGTABLE_S2_IDMAP: Only use identity mappings. 112 */ 113enum kvm_pgtable_stage2_flags { 114 KVM_PGTABLE_S2_NOFWB = BIT(0), 115 KVM_PGTABLE_S2_IDMAP = BIT(1), 116}; 117 118/** 119 * enum kvm_pgtable_prot - Page-table permissions and attributes. 120 * @KVM_PGTABLE_PROT_X: Execute permission. 121 * @KVM_PGTABLE_PROT_W: Write permission. 122 * @KVM_PGTABLE_PROT_R: Read permission. 123 * @KVM_PGTABLE_PROT_DEVICE: Device attributes. 124 * @KVM_PGTABLE_PROT_SW0: Software bit 0. 125 * @KVM_PGTABLE_PROT_SW1: Software bit 1. 126 * @KVM_PGTABLE_PROT_SW2: Software bit 2. 127 * @KVM_PGTABLE_PROT_SW3: Software bit 3. 128 */ 129enum kvm_pgtable_prot { 130 KVM_PGTABLE_PROT_X = BIT(0), 131 KVM_PGTABLE_PROT_W = BIT(1), 132 KVM_PGTABLE_PROT_R = BIT(2), 133 134 KVM_PGTABLE_PROT_DEVICE = BIT(3), 135 136 KVM_PGTABLE_PROT_SW0 = BIT(55), 137 KVM_PGTABLE_PROT_SW1 = BIT(56), 138 KVM_PGTABLE_PROT_SW2 = BIT(57), 139 KVM_PGTABLE_PROT_SW3 = BIT(58), 140}; 141 142#define KVM_PGTABLE_PROT_RW (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W) 143#define KVM_PGTABLE_PROT_RWX (KVM_PGTABLE_PROT_RW | KVM_PGTABLE_PROT_X) 144 145#define PKVM_HOST_MEM_PROT KVM_PGTABLE_PROT_RWX 146#define PKVM_HOST_MMIO_PROT KVM_PGTABLE_PROT_RW 147 148#define PAGE_HYP KVM_PGTABLE_PROT_RW 149#define PAGE_HYP_EXEC (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_X) 150#define PAGE_HYP_RO (KVM_PGTABLE_PROT_R) 151#define PAGE_HYP_DEVICE (PAGE_HYP | KVM_PGTABLE_PROT_DEVICE) 152 153typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end, 154 enum kvm_pgtable_prot prot); 155 156/** 157 * struct kvm_pgtable - KVM page-table. 158 * @ia_bits: Maximum input address size, in bits. 159 * @start_level: Level at which the page-table walk starts. 160 * @pgd: Pointer to the first top-level entry of the page-table. 161 * @mm_ops: Memory management callbacks. 162 * @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables. 163 * @flags: Stage-2 page-table flags. 164 * @force_pte_cb: Function that returns true if page level mappings must 165 * be used instead of block mappings. 166 */ 167struct kvm_pgtable { 168 u32 ia_bits; 169 u32 start_level; 170 kvm_pte_t *pgd; 171 struct kvm_pgtable_mm_ops *mm_ops; 172 173 /* Stage-2 only */ 174 struct kvm_s2_mmu *mmu; 175 enum kvm_pgtable_stage2_flags flags; 176 kvm_pgtable_force_pte_cb_t force_pte_cb; 177}; 178 179/** 180 * enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk. 181 * @KVM_PGTABLE_WALK_LEAF: Visit leaf entries, including invalid 182 * entries. 183 * @KVM_PGTABLE_WALK_TABLE_PRE: Visit table entries before their 184 * children. 185 * @KVM_PGTABLE_WALK_TABLE_POST: Visit table entries after their 186 * children. 187 */ 188enum kvm_pgtable_walk_flags { 189 KVM_PGTABLE_WALK_LEAF = BIT(0), 190 KVM_PGTABLE_WALK_TABLE_PRE = BIT(1), 191 KVM_PGTABLE_WALK_TABLE_POST = BIT(2), 192}; 193 194typedef int (*kvm_pgtable_visitor_fn_t)(u64 addr, u64 end, u32 level, 195 kvm_pte_t *ptep, 196 enum kvm_pgtable_walk_flags flag, 197 void * const arg); 198 199/** 200 * struct kvm_pgtable_walker - Hook into a page-table walk. 201 * @cb: Callback function to invoke during the walk. 202 * @arg: Argument passed to the callback function. 203 * @flags: Bitwise-OR of flags to identify the entry types on which to 204 * invoke the callback function. 205 */ 206struct kvm_pgtable_walker { 207 const kvm_pgtable_visitor_fn_t cb; 208 void * const arg; 209 const enum kvm_pgtable_walk_flags flags; 210}; 211 212/** 213 * kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table. 214 * @pgt: Uninitialised page-table structure to initialise. 215 * @va_bits: Maximum virtual address bits. 216 * @mm_ops: Memory management callbacks. 217 * 218 * Return: 0 on success, negative error code on failure. 219 */ 220int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, 221 struct kvm_pgtable_mm_ops *mm_ops); 222 223/** 224 * kvm_pgtable_hyp_destroy() - Destroy an unused hypervisor stage-1 page-table. 225 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init(). 226 * 227 * The page-table is assumed to be unreachable by any hardware walkers prior 228 * to freeing and therefore no TLB invalidation is performed. 229 */ 230void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt); 231 232/** 233 * kvm_pgtable_hyp_map() - Install a mapping in a hypervisor stage-1 page-table. 234 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init(). 235 * @addr: Virtual address at which to place the mapping. 236 * @size: Size of the mapping. 237 * @phys: Physical address of the memory to map. 238 * @prot: Permissions and attributes for the mapping. 239 * 240 * The offset of @addr within a page is ignored, @size is rounded-up to 241 * the next page boundary and @phys is rounded-down to the previous page 242 * boundary. 243 * 244 * If device attributes are not explicitly requested in @prot, then the 245 * mapping will be normal, cacheable. Attempts to install a new mapping 246 * for a virtual address that is already mapped will be rejected with an 247 * error and a WARN(). 248 * 249 * Return: 0 on success, negative error code on failure. 250 */ 251int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, 252 enum kvm_pgtable_prot prot); 253 254/** 255 * kvm_pgtable_hyp_unmap() - Remove a mapping from a hypervisor stage-1 page-table. 256 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init(). 257 * @addr: Virtual address from which to remove the mapping. 258 * @size: Size of the mapping. 259 * 260 * The offset of @addr within a page is ignored, @size is rounded-up to 261 * the next page boundary and @phys is rounded-down to the previous page 262 * boundary. 263 * 264 * TLB invalidation is performed for each page-table entry cleared during the 265 * unmapping operation and the reference count for the page-table page 266 * containing the cleared entry is decremented, with unreferenced pages being 267 * freed. The unmapping operation will stop early if it encounters either an 268 * invalid page-table entry or a valid block mapping which maps beyond the range 269 * being unmapped. 270 * 271 * Return: Number of bytes unmapped, which may be 0. 272 */ 273u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size); 274 275/** 276 * kvm_get_vtcr() - Helper to construct VTCR_EL2 277 * @mmfr0: Sanitized value of SYS_ID_AA64MMFR0_EL1 register. 278 * @mmfr1: Sanitized value of SYS_ID_AA64MMFR1_EL1 register. 279 * @phys_shfit: Value to set in VTCR_EL2.T0SZ. 280 * 281 * The VTCR value is common across all the physical CPUs on the system. 282 * We use system wide sanitised values to fill in different fields, 283 * except for Hardware Management of Access Flags. HA Flag is set 284 * unconditionally on all CPUs, as it is safe to run with or without 285 * the feature and the bit is RES0 on CPUs that don't support it. 286 * 287 * Return: VTCR_EL2 value 288 */ 289u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift); 290 291/** 292 * __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table. 293 * @pgt: Uninitialised page-table structure to initialise. 294 * @mmu: S2 MMU context for this S2 translation 295 * @mm_ops: Memory management callbacks. 296 * @flags: Stage-2 configuration flags. 297 * @force_pte_cb: Function that returns true if page level mappings must 298 * be used instead of block mappings. 299 * 300 * Return: 0 on success, negative error code on failure. 301 */ 302int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, 303 struct kvm_pgtable_mm_ops *mm_ops, 304 enum kvm_pgtable_stage2_flags flags, 305 kvm_pgtable_force_pte_cb_t force_pte_cb); 306 307#define kvm_pgtable_stage2_init(pgt, mmu, mm_ops) \ 308 __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL) 309 310/** 311 * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table. 312 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 313 * 314 * The page-table is assumed to be unreachable by any hardware walkers prior 315 * to freeing and therefore no TLB invalidation is performed. 316 */ 317void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); 318 319/** 320 * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table. 321 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 322 * @addr: Intermediate physical address at which to place the mapping. 323 * @size: Size of the mapping. 324 * @phys: Physical address of the memory to map. 325 * @prot: Permissions and attributes for the mapping. 326 * @mc: Cache of pre-allocated and zeroed memory from which to allocate 327 * page-table pages. 328 * 329 * The offset of @addr within a page is ignored, @size is rounded-up to 330 * the next page boundary and @phys is rounded-down to the previous page 331 * boundary. 332 * 333 * If device attributes are not explicitly requested in @prot, then the 334 * mapping will be normal, cacheable. 335 * 336 * Note that the update of a valid leaf PTE in this function will be aborted, 337 * if it's trying to recreate the exact same mapping or only change the access 338 * permissions. Instead, the vCPU will exit one more time from guest if still 339 * needed and then go through the path of relaxing permissions. 340 * 341 * Note that this function will both coalesce existing table entries and split 342 * existing block mappings, relying on page-faults to fault back areas outside 343 * of the new mapping lazily. 344 * 345 * Return: 0 on success, negative error code on failure. 346 */ 347int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, 348 u64 phys, enum kvm_pgtable_prot prot, 349 void *mc); 350 351/** 352 * kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to 353 * track ownership. 354 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 355 * @addr: Base intermediate physical address to annotate. 356 * @size: Size of the annotated range. 357 * @mc: Cache of pre-allocated and zeroed memory from which to allocate 358 * page-table pages. 359 * @owner_id: Unique identifier for the owner of the page. 360 * 361 * By default, all page-tables are owned by identifier 0. This function can be 362 * used to mark portions of the IPA space as owned by other entities. When a 363 * stage 2 is used with identity-mappings, these annotations allow to use the 364 * page-table data structure as a simple rmap. 365 * 366 * Return: 0 on success, negative error code on failure. 367 */ 368int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size, 369 void *mc, u8 owner_id); 370 371/** 372 * kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table. 373 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 374 * @addr: Intermediate physical address from which to remove the mapping. 375 * @size: Size of the mapping. 376 * 377 * The offset of @addr within a page is ignored and @size is rounded-up to 378 * the next page boundary. 379 * 380 * TLB invalidation is performed for each page-table entry cleared during the 381 * unmapping operation and the reference count for the page-table page 382 * containing the cleared entry is decremented, with unreferenced pages being 383 * freed. Unmapping a cacheable page will ensure that it is clean to the PoC if 384 * FWB is not supported by the CPU. 385 * 386 * Return: 0 on success, negative error code on failure. 387 */ 388int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size); 389 390/** 391 * kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range 392 * without TLB invalidation. 393 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 394 * @addr: Intermediate physical address from which to write-protect, 395 * @size: Size of the range. 396 * 397 * The offset of @addr within a page is ignored and @size is rounded-up to 398 * the next page boundary. 399 * 400 * Note that it is the caller's responsibility to invalidate the TLB after 401 * calling this function to ensure that the updated permissions are visible 402 * to the CPUs. 403 * 404 * Return: 0 on success, negative error code on failure. 405 */ 406int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size); 407 408/** 409 * kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry. 410 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 411 * @addr: Intermediate physical address to identify the page-table entry. 412 * 413 * The offset of @addr within a page is ignored. 414 * 415 * If there is a valid, leaf page-table entry used to translate @addr, then 416 * set the access flag in that entry. 417 * 418 * Return: The old page-table entry prior to setting the flag, 0 on failure. 419 */ 420kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr); 421 422/** 423 * kvm_pgtable_stage2_mkold() - Clear the access flag in a page-table entry. 424 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 425 * @addr: Intermediate physical address to identify the page-table entry. 426 * 427 * The offset of @addr within a page is ignored. 428 * 429 * If there is a valid, leaf page-table entry used to translate @addr, then 430 * clear the access flag in that entry. 431 * 432 * Note that it is the caller's responsibility to invalidate the TLB after 433 * calling this function to ensure that the updated permissions are visible 434 * to the CPUs. 435 * 436 * Return: The old page-table entry prior to clearing the flag, 0 on failure. 437 */ 438kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr); 439 440/** 441 * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a 442 * page-table entry. 443 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 444 * @addr: Intermediate physical address to identify the page-table entry. 445 * @prot: Additional permissions to grant for the mapping. 446 * 447 * The offset of @addr within a page is ignored. 448 * 449 * If there is a valid, leaf page-table entry used to translate @addr, then 450 * relax the permissions in that entry according to the read, write and 451 * execute permissions specified by @prot. No permissions are removed, and 452 * TLB invalidation is performed after updating the entry. Software bits cannot 453 * be set or cleared using kvm_pgtable_stage2_relax_perms(). 454 * 455 * Return: 0 on success, negative error code on failure. 456 */ 457int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, 458 enum kvm_pgtable_prot prot); 459 460/** 461 * kvm_pgtable_stage2_is_young() - Test whether a page-table entry has the 462 * access flag set. 463 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 464 * @addr: Intermediate physical address to identify the page-table entry. 465 * 466 * The offset of @addr within a page is ignored. 467 * 468 * Return: True if the page-table entry has the access flag set, false otherwise. 469 */ 470bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr); 471 472/** 473 * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point 474 * of Coherency for guest stage-2 address 475 * range. 476 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 477 * @addr: Intermediate physical address from which to flush. 478 * @size: Size of the range. 479 * 480 * The offset of @addr within a page is ignored and @size is rounded-up to 481 * the next page boundary. 482 * 483 * Return: 0 on success, negative error code on failure. 484 */ 485int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size); 486 487/** 488 * kvm_pgtable_walk() - Walk a page-table. 489 * @pgt: Page-table structure initialised by kvm_pgtable_*_init(). 490 * @addr: Input address for the start of the walk. 491 * @size: Size of the range to walk. 492 * @walker: Walker callback description. 493 * 494 * The offset of @addr within a page is ignored and @size is rounded-up to 495 * the next page boundary. 496 * 497 * The walker will walk the page-table entries corresponding to the input 498 * address range specified, visiting entries according to the walker flags. 499 * Invalid entries are treated as leaf entries. Leaf entries are reloaded 500 * after invoking the walker callback, allowing the walker to descend into 501 * a newly installed table. 502 * 503 * Returning a negative error code from the walker callback function will 504 * terminate the walk immediately with the same error code. 505 * 506 * Return: 0 on success, negative error code on failure. 507 */ 508int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, 509 struct kvm_pgtable_walker *walker); 510 511/** 512 * kvm_pgtable_get_leaf() - Walk a page-table and retrieve the leaf entry 513 * with its level. 514 * @pgt: Page-table structure initialised by kvm_pgtable_*_init() 515 * or a similar initialiser. 516 * @addr: Input address for the start of the walk. 517 * @ptep: Pointer to storage for the retrieved PTE. 518 * @level: Pointer to storage for the level of the retrieved PTE. 519 * 520 * The offset of @addr within a page is ignored. 521 * 522 * The walker will walk the page-table entries corresponding to the input 523 * address specified, retrieving the leaf corresponding to this address. 524 * Invalid entries are treated as leaf entries. 525 * 526 * Return: 0 on success, negative error code on failure. 527 */ 528int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr, 529 kvm_pte_t *ptep, u32 *level); 530 531/** 532 * kvm_pgtable_stage2_pte_prot() - Retrieve the protection attributes of a 533 * stage-2 Page-Table Entry. 534 * @pte: Page-table entry 535 * 536 * Return: protection attributes of the page-table entry in the enum 537 * kvm_pgtable_prot format. 538 */ 539enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte); 540 541/** 542 * kvm_pgtable_hyp_pte_prot() - Retrieve the protection attributes of a stage-1 543 * Page-Table Entry. 544 * @pte: Page-table entry 545 * 546 * Return: protection attributes of the page-table entry in the enum 547 * kvm_pgtable_prot format. 548 */ 549enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte); 550#endif /* __ARM64_KVM_PGTABLE_H__ */