grumain.c (25644B)
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * SN Platform GRU Driver 4 * 5 * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD 6 * 7 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. 8 */ 9 10#include <linux/kernel.h> 11#include <linux/slab.h> 12#include <linux/mm.h> 13#include <linux/spinlock.h> 14#include <linux/sched.h> 15#include <linux/device.h> 16#include <linux/list.h> 17#include <linux/err.h> 18#include <linux/prefetch.h> 19#include <asm/uv/uv_hub.h> 20#include "gru.h" 21#include "grutables.h" 22#include "gruhandles.h" 23 24unsigned long gru_options __read_mostly; 25 26static struct device_driver gru_driver = { 27 .name = "gru" 28}; 29 30static struct device gru_device = { 31 .init_name = "", 32 .driver = &gru_driver, 33}; 34 35struct device *grudev = &gru_device; 36 37/* 38 * Select a gru fault map to be used by the current cpu. Note that 39 * multiple cpus may be using the same map. 40 * ZZZ should be inline but did not work on emulator 41 */ 42int gru_cpu_fault_map_id(void) 43{ 44#ifdef CONFIG_IA64 45 return uv_blade_processor_id() % GRU_NUM_TFM; 46#else 47 int cpu = smp_processor_id(); 48 int id, core; 49 50 core = uv_cpu_core_number(cpu); 51 id = core + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu); 52 return id; 53#endif 54} 55 56/*--------- ASID Management ------------------------------------------- 57 * 58 * Initially, assign asids sequentially from MIN_ASID .. MAX_ASID. 59 * Once MAX is reached, flush the TLB & start over. However, 60 * some asids may still be in use. There won't be many (percentage wise) still 61 * in use. Search active contexts & determine the value of the first 62 * asid in use ("x"s below). Set "limit" to this value. 63 * This defines a block of assignable asids. 64 * 65 * When "limit" is reached, search forward from limit+1 and determine the 66 * next block of assignable asids. 67 * 68 * Repeat until MAX_ASID is reached, then start over again. 69 * 70 * Each time MAX_ASID is reached, increment the asid generation. Since 71 * the search for in-use asids only checks contexts with GRUs currently 72 * assigned, asids in some contexts will be missed. Prior to loading 73 * a context, the asid generation of the GTS asid is rechecked. If it 74 * doesn't match the current generation, a new asid will be assigned. 75 * 76 * 0---------------x------------x---------------------x----| 77 * ^-next ^-limit ^-MAX_ASID 78 * 79 * All asid manipulation & context loading/unloading is protected by the 80 * gs_lock. 81 */ 82 83/* Hit the asid limit. Start over */ 84static int gru_wrap_asid(struct gru_state *gru) 85{ 86 gru_dbg(grudev, "gid %d\n", gru->gs_gid); 87 STAT(asid_wrap); 88 gru->gs_asid_gen++; 89 return MIN_ASID; 90} 91 92/* Find the next chunk of unused asids */ 93static int gru_reset_asid_limit(struct gru_state *gru, int asid) 94{ 95 int i, gid, inuse_asid, limit; 96 97 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); 98 STAT(asid_next); 99 limit = MAX_ASID; 100 if (asid >= limit) 101 asid = gru_wrap_asid(gru); 102 gru_flush_all_tlb(gru); 103 gid = gru->gs_gid; 104again: 105 for (i = 0; i < GRU_NUM_CCH; i++) { 106 if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i])) 107 continue; 108 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid; 109 gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n", 110 gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms, 111 inuse_asid, i); 112 if (inuse_asid == asid) { 113 asid += ASID_INC; 114 if (asid >= limit) { 115 /* 116 * empty range: reset the range limit and 117 * start over 118 */ 119 limit = MAX_ASID; 120 if (asid >= MAX_ASID) 121 asid = gru_wrap_asid(gru); 122 goto again; 123 } 124 } 125 126 if ((inuse_asid > asid) && (inuse_asid < limit)) 127 limit = inuse_asid; 128 } 129 gru->gs_asid_limit = limit; 130 gru->gs_asid = asid; 131 gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid, 132 asid, limit); 133 return asid; 134} 135 136/* Assign a new ASID to a thread context. */ 137static int gru_assign_asid(struct gru_state *gru) 138{ 139 int asid; 140 141 gru->gs_asid += ASID_INC; 142 asid = gru->gs_asid; 143 if (asid >= gru->gs_asid_limit) 144 asid = gru_reset_asid_limit(gru, asid); 145 146 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); 147 return asid; 148} 149 150/* 151 * Clear n bits in a word. Return a word indicating the bits that were cleared. 152 * Optionally, build an array of chars that contain the bit numbers allocated. 153 */ 154static unsigned long reserve_resources(unsigned long *p, int n, int mmax, 155 char *idx) 156{ 157 unsigned long bits = 0; 158 int i; 159 160 while (n--) { 161 i = find_first_bit(p, mmax); 162 if (i == mmax) 163 BUG(); 164 __clear_bit(i, p); 165 __set_bit(i, &bits); 166 if (idx) 167 *idx++ = i; 168 } 169 return bits; 170} 171 172unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count, 173 char *cbmap) 174{ 175 return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU, 176 cbmap); 177} 178 179unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count, 180 char *dsmap) 181{ 182 return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU, 183 dsmap); 184} 185 186static void reserve_gru_resources(struct gru_state *gru, 187 struct gru_thread_state *gts) 188{ 189 gru->gs_active_contexts++; 190 gts->ts_cbr_map = 191 gru_reserve_cb_resources(gru, gts->ts_cbr_au_count, 192 gts->ts_cbr_idx); 193 gts->ts_dsr_map = 194 gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL); 195} 196 197static void free_gru_resources(struct gru_state *gru, 198 struct gru_thread_state *gts) 199{ 200 gru->gs_active_contexts--; 201 gru->gs_cbr_map |= gts->ts_cbr_map; 202 gru->gs_dsr_map |= gts->ts_dsr_map; 203} 204 205/* 206 * Check if a GRU has sufficient free resources to satisfy an allocation 207 * request. Note: GRU locks may or may not be held when this is called. If 208 * not held, recheck after acquiring the appropriate locks. 209 * 210 * Returns 1 if sufficient resources, 0 if not 211 */ 212static int check_gru_resources(struct gru_state *gru, int cbr_au_count, 213 int dsr_au_count, int max_active_contexts) 214{ 215 return hweight64(gru->gs_cbr_map) >= cbr_au_count 216 && hweight64(gru->gs_dsr_map) >= dsr_au_count 217 && gru->gs_active_contexts < max_active_contexts; 218} 219 220/* 221 * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG 222 * context. 223 */ 224static int gru_load_mm_tracker(struct gru_state *gru, 225 struct gru_thread_state *gts) 226{ 227 struct gru_mm_struct *gms = gts->ts_gms; 228 struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid]; 229 unsigned short ctxbitmap = (1 << gts->ts_ctxnum); 230 int asid; 231 232 spin_lock(&gms->ms_asid_lock); 233 asid = asids->mt_asid; 234 235 spin_lock(&gru->gs_asid_lock); 236 if (asid == 0 || (asids->mt_ctxbitmap == 0 && asids->mt_asid_gen != 237 gru->gs_asid_gen)) { 238 asid = gru_assign_asid(gru); 239 asids->mt_asid = asid; 240 asids->mt_asid_gen = gru->gs_asid_gen; 241 STAT(asid_new); 242 } else { 243 STAT(asid_reuse); 244 } 245 spin_unlock(&gru->gs_asid_lock); 246 247 BUG_ON(asids->mt_ctxbitmap & ctxbitmap); 248 asids->mt_ctxbitmap |= ctxbitmap; 249 if (!test_bit(gru->gs_gid, gms->ms_asidmap)) 250 __set_bit(gru->gs_gid, gms->ms_asidmap); 251 spin_unlock(&gms->ms_asid_lock); 252 253 gru_dbg(grudev, 254 "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n", 255 gru->gs_gid, gts, gms, gts->ts_ctxnum, asid, 256 gms->ms_asidmap[0]); 257 return asid; 258} 259 260static void gru_unload_mm_tracker(struct gru_state *gru, 261 struct gru_thread_state *gts) 262{ 263 struct gru_mm_struct *gms = gts->ts_gms; 264 struct gru_mm_tracker *asids; 265 unsigned short ctxbitmap; 266 267 asids = &gms->ms_asids[gru->gs_gid]; 268 ctxbitmap = (1 << gts->ts_ctxnum); 269 spin_lock(&gms->ms_asid_lock); 270 spin_lock(&gru->gs_asid_lock); 271 BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap); 272 asids->mt_ctxbitmap ^= ctxbitmap; 273 gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum %d, asidmap 0x%lx\n", 274 gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]); 275 spin_unlock(&gru->gs_asid_lock); 276 spin_unlock(&gms->ms_asid_lock); 277} 278 279/* 280 * Decrement the reference count on a GTS structure. Free the structure 281 * if the reference count goes to zero. 282 */ 283void gts_drop(struct gru_thread_state *gts) 284{ 285 if (gts && refcount_dec_and_test(>s->ts_refcnt)) { 286 if (gts->ts_gms) 287 gru_drop_mmu_notifier(gts->ts_gms); 288 kfree(gts); 289 STAT(gts_free); 290 } 291} 292 293/* 294 * Locate the GTS structure for the current thread. 295 */ 296static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data 297 *vdata, int tsid) 298{ 299 struct gru_thread_state *gts; 300 301 list_for_each_entry(gts, &vdata->vd_head, ts_next) 302 if (gts->ts_tsid == tsid) 303 return gts; 304 return NULL; 305} 306 307/* 308 * Allocate a thread state structure. 309 */ 310struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, 311 int cbr_au_count, int dsr_au_count, 312 unsigned char tlb_preload_count, int options, int tsid) 313{ 314 struct gru_thread_state *gts; 315 struct gru_mm_struct *gms; 316 int bytes; 317 318 bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count); 319 bytes += sizeof(struct gru_thread_state); 320 gts = kmalloc(bytes, GFP_KERNEL); 321 if (!gts) 322 return ERR_PTR(-ENOMEM); 323 324 STAT(gts_alloc); 325 memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */ 326 refcount_set(>s->ts_refcnt, 1); 327 mutex_init(>s->ts_ctxlock); 328 gts->ts_cbr_au_count = cbr_au_count; 329 gts->ts_dsr_au_count = dsr_au_count; 330 gts->ts_tlb_preload_count = tlb_preload_count; 331 gts->ts_user_options = options; 332 gts->ts_user_blade_id = -1; 333 gts->ts_user_chiplet_id = -1; 334 gts->ts_tsid = tsid; 335 gts->ts_ctxnum = NULLCTX; 336 gts->ts_tlb_int_select = -1; 337 gts->ts_cch_req_slice = -1; 338 gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT); 339 if (vma) { 340 gts->ts_mm = current->mm; 341 gts->ts_vma = vma; 342 gms = gru_register_mmu_notifier(); 343 if (IS_ERR(gms)) 344 goto err; 345 gts->ts_gms = gms; 346 } 347 348 gru_dbg(grudev, "alloc gts %p\n", gts); 349 return gts; 350 351err: 352 gts_drop(gts); 353 return ERR_CAST(gms); 354} 355 356/* 357 * Allocate a vma private data structure. 358 */ 359struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid) 360{ 361 struct gru_vma_data *vdata = NULL; 362 363 vdata = kmalloc(sizeof(*vdata), GFP_KERNEL); 364 if (!vdata) 365 return NULL; 366 367 STAT(vdata_alloc); 368 INIT_LIST_HEAD(&vdata->vd_head); 369 spin_lock_init(&vdata->vd_lock); 370 gru_dbg(grudev, "alloc vdata %p\n", vdata); 371 return vdata; 372} 373 374/* 375 * Find the thread state structure for the current thread. 376 */ 377struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma, 378 int tsid) 379{ 380 struct gru_vma_data *vdata = vma->vm_private_data; 381 struct gru_thread_state *gts; 382 383 spin_lock(&vdata->vd_lock); 384 gts = gru_find_current_gts_nolock(vdata, tsid); 385 spin_unlock(&vdata->vd_lock); 386 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); 387 return gts; 388} 389 390/* 391 * Allocate a new thread state for a GSEG. Note that races may allow 392 * another thread to race to create a gts. 393 */ 394struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma, 395 int tsid) 396{ 397 struct gru_vma_data *vdata = vma->vm_private_data; 398 struct gru_thread_state *gts, *ngts; 399 400 gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, 401 vdata->vd_dsr_au_count, 402 vdata->vd_tlb_preload_count, 403 vdata->vd_user_options, tsid); 404 if (IS_ERR(gts)) 405 return gts; 406 407 spin_lock(&vdata->vd_lock); 408 ngts = gru_find_current_gts_nolock(vdata, tsid); 409 if (ngts) { 410 gts_drop(gts); 411 gts = ngts; 412 STAT(gts_double_allocate); 413 } else { 414 list_add(>s->ts_next, &vdata->vd_head); 415 } 416 spin_unlock(&vdata->vd_lock); 417 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); 418 return gts; 419} 420 421/* 422 * Free the GRU context assigned to the thread state. 423 */ 424static void gru_free_gru_context(struct gru_thread_state *gts) 425{ 426 struct gru_state *gru; 427 428 gru = gts->ts_gru; 429 gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid); 430 431 spin_lock(&gru->gs_lock); 432 gru->gs_gts[gts->ts_ctxnum] = NULL; 433 free_gru_resources(gru, gts); 434 BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0); 435 __clear_bit(gts->ts_ctxnum, &gru->gs_context_map); 436 gts->ts_ctxnum = NULLCTX; 437 gts->ts_gru = NULL; 438 gts->ts_blade = -1; 439 spin_unlock(&gru->gs_lock); 440 441 gts_drop(gts); 442 STAT(free_context); 443} 444 445/* 446 * Prefetching cachelines help hardware performance. 447 * (Strictly a performance enhancement. Not functionally required). 448 */ 449static void prefetch_data(void *p, int num, int stride) 450{ 451 while (num-- > 0) { 452 prefetchw(p); 453 p += stride; 454 } 455} 456 457static inline long gru_copy_handle(void *d, void *s) 458{ 459 memcpy(d, s, GRU_HANDLE_BYTES); 460 return GRU_HANDLE_BYTES; 461} 462 463static void gru_prefetch_context(void *gseg, void *cb, void *cbe, 464 unsigned long cbrmap, unsigned long length) 465{ 466 int i, scr; 467 468 prefetch_data(gseg + GRU_DS_BASE, length / GRU_CACHE_LINE_BYTES, 469 GRU_CACHE_LINE_BYTES); 470 471 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 472 prefetch_data(cb, 1, GRU_CACHE_LINE_BYTES); 473 prefetch_data(cbe + i * GRU_HANDLE_STRIDE, 1, 474 GRU_CACHE_LINE_BYTES); 475 cb += GRU_HANDLE_STRIDE; 476 } 477} 478 479static void gru_load_context_data(void *save, void *grubase, int ctxnum, 480 unsigned long cbrmap, unsigned long dsrmap, 481 int data_valid) 482{ 483 void *gseg, *cb, *cbe; 484 unsigned long length; 485 int i, scr; 486 487 gseg = grubase + ctxnum * GRU_GSEG_STRIDE; 488 cb = gseg + GRU_CB_BASE; 489 cbe = grubase + GRU_CBE_BASE; 490 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; 491 gru_prefetch_context(gseg, cb, cbe, cbrmap, length); 492 493 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 494 if (data_valid) { 495 save += gru_copy_handle(cb, save); 496 save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE, 497 save); 498 } else { 499 memset(cb, 0, GRU_CACHE_LINE_BYTES); 500 memset(cbe + i * GRU_HANDLE_STRIDE, 0, 501 GRU_CACHE_LINE_BYTES); 502 } 503 /* Flush CBE to hide race in context restart */ 504 mb(); 505 gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE); 506 cb += GRU_HANDLE_STRIDE; 507 } 508 509 if (data_valid) 510 memcpy(gseg + GRU_DS_BASE, save, length); 511 else 512 memset(gseg + GRU_DS_BASE, 0, length); 513} 514 515static void gru_unload_context_data(void *save, void *grubase, int ctxnum, 516 unsigned long cbrmap, unsigned long dsrmap) 517{ 518 void *gseg, *cb, *cbe; 519 unsigned long length; 520 int i, scr; 521 522 gseg = grubase + ctxnum * GRU_GSEG_STRIDE; 523 cb = gseg + GRU_CB_BASE; 524 cbe = grubase + GRU_CBE_BASE; 525 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; 526 527 /* CBEs may not be coherent. Flush them from cache */ 528 for_each_cbr_in_allocation_map(i, &cbrmap, scr) 529 gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE); 530 mb(); /* Let the CL flush complete */ 531 532 gru_prefetch_context(gseg, cb, cbe, cbrmap, length); 533 534 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 535 save += gru_copy_handle(save, cb); 536 save += gru_copy_handle(save, cbe + i * GRU_HANDLE_STRIDE); 537 cb += GRU_HANDLE_STRIDE; 538 } 539 memcpy(save, gseg + GRU_DS_BASE, length); 540} 541 542void gru_unload_context(struct gru_thread_state *gts, int savestate) 543{ 544 struct gru_state *gru = gts->ts_gru; 545 struct gru_context_configuration_handle *cch; 546 int ctxnum = gts->ts_ctxnum; 547 548 if (!is_kernel_context(gts)) 549 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); 550 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 551 552 gru_dbg(grudev, "gts %p, cbrmap 0x%lx, dsrmap 0x%lx\n", 553 gts, gts->ts_cbr_map, gts->ts_dsr_map); 554 lock_cch_handle(cch); 555 if (cch_interrupt_sync(cch)) 556 BUG(); 557 558 if (!is_kernel_context(gts)) 559 gru_unload_mm_tracker(gru, gts); 560 if (savestate) { 561 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, 562 ctxnum, gts->ts_cbr_map, 563 gts->ts_dsr_map); 564 gts->ts_data_valid = 1; 565 } 566 567 if (cch_deallocate(cch)) 568 BUG(); 569 unlock_cch_handle(cch); 570 571 gru_free_gru_context(gts); 572} 573 574/* 575 * Load a GRU context by copying it from the thread data structure in memory 576 * to the GRU. 577 */ 578void gru_load_context(struct gru_thread_state *gts) 579{ 580 struct gru_state *gru = gts->ts_gru; 581 struct gru_context_configuration_handle *cch; 582 int i, err, asid, ctxnum = gts->ts_ctxnum; 583 584 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 585 lock_cch_handle(cch); 586 cch->tfm_fault_bit_enable = 587 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL 588 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); 589 cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); 590 if (cch->tlb_int_enable) { 591 gts->ts_tlb_int_select = gru_cpu_fault_map_id(); 592 cch->tlb_int_select = gts->ts_tlb_int_select; 593 } 594 if (gts->ts_cch_req_slice >= 0) { 595 cch->req_slice_set_enable = 1; 596 cch->req_slice = gts->ts_cch_req_slice; 597 } else { 598 cch->req_slice_set_enable =0; 599 } 600 cch->tfm_done_bit_enable = 0; 601 cch->dsr_allocation_map = gts->ts_dsr_map; 602 cch->cbr_allocation_map = gts->ts_cbr_map; 603 604 if (is_kernel_context(gts)) { 605 cch->unmap_enable = 1; 606 cch->tfm_done_bit_enable = 1; 607 cch->cb_int_enable = 1; 608 cch->tlb_int_select = 0; /* For now, ints go to cpu 0 */ 609 } else { 610 cch->unmap_enable = 0; 611 cch->tfm_done_bit_enable = 0; 612 cch->cb_int_enable = 0; 613 asid = gru_load_mm_tracker(gru, gts); 614 for (i = 0; i < 8; i++) { 615 cch->asid[i] = asid + i; 616 cch->sizeavail[i] = gts->ts_sizeavail; 617 } 618 } 619 620 err = cch_allocate(cch); 621 if (err) { 622 gru_dbg(grudev, 623 "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n", 624 err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map); 625 BUG(); 626 } 627 628 gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum, 629 gts->ts_cbr_map, gts->ts_dsr_map, gts->ts_data_valid); 630 631 if (cch_start(cch)) 632 BUG(); 633 unlock_cch_handle(cch); 634 635 gru_dbg(grudev, "gid %d, gts %p, cbrmap 0x%lx, dsrmap 0x%lx, tie %d, tis %d\n", 636 gts->ts_gru->gs_gid, gts, gts->ts_cbr_map, gts->ts_dsr_map, 637 (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR), gts->ts_tlb_int_select); 638} 639 640/* 641 * Update fields in an active CCH: 642 * - retarget interrupts on local blade 643 * - update sizeavail mask 644 */ 645int gru_update_cch(struct gru_thread_state *gts) 646{ 647 struct gru_context_configuration_handle *cch; 648 struct gru_state *gru = gts->ts_gru; 649 int i, ctxnum = gts->ts_ctxnum, ret = 0; 650 651 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 652 653 lock_cch_handle(cch); 654 if (cch->state == CCHSTATE_ACTIVE) { 655 if (gru->gs_gts[gts->ts_ctxnum] != gts) 656 goto exit; 657 if (cch_interrupt(cch)) 658 BUG(); 659 for (i = 0; i < 8; i++) 660 cch->sizeavail[i] = gts->ts_sizeavail; 661 gts->ts_tlb_int_select = gru_cpu_fault_map_id(); 662 cch->tlb_int_select = gru_cpu_fault_map_id(); 663 cch->tfm_fault_bit_enable = 664 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL 665 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); 666 if (cch_start(cch)) 667 BUG(); 668 ret = 1; 669 } 670exit: 671 unlock_cch_handle(cch); 672 return ret; 673} 674 675/* 676 * Update CCH tlb interrupt select. Required when all the following is true: 677 * - task's GRU context is loaded into a GRU 678 * - task is using interrupt notification for TLB faults 679 * - task has migrated to a different cpu on the same blade where 680 * it was previously running. 681 */ 682static int gru_retarget_intr(struct gru_thread_state *gts) 683{ 684 if (gts->ts_tlb_int_select < 0 685 || gts->ts_tlb_int_select == gru_cpu_fault_map_id()) 686 return 0; 687 688 gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select, 689 gru_cpu_fault_map_id()); 690 return gru_update_cch(gts); 691} 692 693/* 694 * Check if a GRU context is allowed to use a specific chiplet. By default 695 * a context is assigned to any blade-local chiplet. However, users can 696 * override this. 697 * Returns 1 if assignment allowed, 0 otherwise 698 */ 699static int gru_check_chiplet_assignment(struct gru_state *gru, 700 struct gru_thread_state *gts) 701{ 702 int blade_id; 703 int chiplet_id; 704 705 blade_id = gts->ts_user_blade_id; 706 if (blade_id < 0) 707 blade_id = uv_numa_blade_id(); 708 709 chiplet_id = gts->ts_user_chiplet_id; 710 return gru->gs_blade_id == blade_id && 711 (chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id); 712} 713 714/* 715 * Unload the gru context if it is not assigned to the correct blade or 716 * chiplet. Misassignment can occur if the process migrates to a different 717 * blade or if the user changes the selected blade/chiplet. 718 */ 719void gru_check_context_placement(struct gru_thread_state *gts) 720{ 721 struct gru_state *gru; 722 723 /* 724 * If the current task is the context owner, verify that the 725 * context is correctly placed. This test is skipped for non-owner 726 * references. Pthread apps use non-owner references to the CBRs. 727 */ 728 gru = gts->ts_gru; 729 if (!gru || gts->ts_tgid_owner != current->tgid) 730 return; 731 732 if (!gru_check_chiplet_assignment(gru, gts)) { 733 STAT(check_context_unload); 734 gru_unload_context(gts, 1); 735 } else if (gru_retarget_intr(gts)) { 736 STAT(check_context_retarget_intr); 737 } 738} 739 740 741/* 742 * Insufficient GRU resources available on the local blade. Steal a context from 743 * a process. This is a hack until a _real_ resource scheduler is written.... 744 */ 745#define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0) 746#define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \ 747 ((g)+1) : &(b)->bs_grus[0]) 748 749static int is_gts_stealable(struct gru_thread_state *gts, 750 struct gru_blade_state *bs) 751{ 752 if (is_kernel_context(gts)) 753 return down_write_trylock(&bs->bs_kgts_sema); 754 else 755 return mutex_trylock(>s->ts_ctxlock); 756} 757 758static void gts_stolen(struct gru_thread_state *gts, 759 struct gru_blade_state *bs) 760{ 761 if (is_kernel_context(gts)) { 762 up_write(&bs->bs_kgts_sema); 763 STAT(steal_kernel_context); 764 } else { 765 mutex_unlock(>s->ts_ctxlock); 766 STAT(steal_user_context); 767 } 768} 769 770void gru_steal_context(struct gru_thread_state *gts) 771{ 772 struct gru_blade_state *blade; 773 struct gru_state *gru, *gru0; 774 struct gru_thread_state *ngts = NULL; 775 int ctxnum, ctxnum0, flag = 0, cbr, dsr; 776 int blade_id; 777 778 blade_id = gts->ts_user_blade_id; 779 if (blade_id < 0) 780 blade_id = uv_numa_blade_id(); 781 cbr = gts->ts_cbr_au_count; 782 dsr = gts->ts_dsr_au_count; 783 784 blade = gru_base[blade_id]; 785 spin_lock(&blade->bs_lock); 786 787 ctxnum = next_ctxnum(blade->bs_lru_ctxnum); 788 gru = blade->bs_lru_gru; 789 if (ctxnum == 0) 790 gru = next_gru(blade, gru); 791 blade->bs_lru_gru = gru; 792 blade->bs_lru_ctxnum = ctxnum; 793 ctxnum0 = ctxnum; 794 gru0 = gru; 795 while (1) { 796 if (gru_check_chiplet_assignment(gru, gts)) { 797 if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH)) 798 break; 799 spin_lock(&gru->gs_lock); 800 for (; ctxnum < GRU_NUM_CCH; ctxnum++) { 801 if (flag && gru == gru0 && ctxnum == ctxnum0) 802 break; 803 ngts = gru->gs_gts[ctxnum]; 804 /* 805 * We are grabbing locks out of order, so trylock is 806 * needed. GTSs are usually not locked, so the odds of 807 * success are high. If trylock fails, try to steal a 808 * different GSEG. 809 */ 810 if (ngts && is_gts_stealable(ngts, blade)) 811 break; 812 ngts = NULL; 813 } 814 spin_unlock(&gru->gs_lock); 815 if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0)) 816 break; 817 } 818 if (flag && gru == gru0) 819 break; 820 flag = 1; 821 ctxnum = 0; 822 gru = next_gru(blade, gru); 823 } 824 spin_unlock(&blade->bs_lock); 825 826 if (ngts) { 827 gts->ustats.context_stolen++; 828 ngts->ts_steal_jiffies = jiffies; 829 gru_unload_context(ngts, is_kernel_context(ngts) ? 0 : 1); 830 gts_stolen(ngts, blade); 831 } else { 832 STAT(steal_context_failed); 833 } 834 gru_dbg(grudev, 835 "stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;" 836 " avail cb %ld, ds %ld\n", 837 gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map), 838 hweight64(gru->gs_dsr_map)); 839} 840 841/* 842 * Assign a gru context. 843 */ 844static int gru_assign_context_number(struct gru_state *gru) 845{ 846 int ctxnum; 847 848 ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH); 849 __set_bit(ctxnum, &gru->gs_context_map); 850 return ctxnum; 851} 852 853/* 854 * Scan the GRUs on the local blade & assign a GRU context. 855 */ 856struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts) 857{ 858 struct gru_state *gru, *grux; 859 int i, max_active_contexts; 860 int blade_id = gts->ts_user_blade_id; 861 862 if (blade_id < 0) 863 blade_id = uv_numa_blade_id(); 864again: 865 gru = NULL; 866 max_active_contexts = GRU_NUM_CCH; 867 for_each_gru_on_blade(grux, blade_id, i) { 868 if (!gru_check_chiplet_assignment(grux, gts)) 869 continue; 870 if (check_gru_resources(grux, gts->ts_cbr_au_count, 871 gts->ts_dsr_au_count, 872 max_active_contexts)) { 873 gru = grux; 874 max_active_contexts = grux->gs_active_contexts; 875 if (max_active_contexts == 0) 876 break; 877 } 878 } 879 880 if (gru) { 881 spin_lock(&gru->gs_lock); 882 if (!check_gru_resources(gru, gts->ts_cbr_au_count, 883 gts->ts_dsr_au_count, GRU_NUM_CCH)) { 884 spin_unlock(&gru->gs_lock); 885 goto again; 886 } 887 reserve_gru_resources(gru, gts); 888 gts->ts_gru = gru; 889 gts->ts_blade = gru->gs_blade_id; 890 gts->ts_ctxnum = gru_assign_context_number(gru); 891 refcount_inc(>s->ts_refcnt); 892 gru->gs_gts[gts->ts_ctxnum] = gts; 893 spin_unlock(&gru->gs_lock); 894 895 STAT(assign_context); 896 gru_dbg(grudev, 897 "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n", 898 gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts, 899 gts->ts_gru->gs_gid, gts->ts_ctxnum, 900 gts->ts_cbr_au_count, gts->ts_dsr_au_count); 901 } else { 902 gru_dbg(grudev, "failed to allocate a GTS %s\n", ""); 903 STAT(assign_context_failed); 904 } 905 906 return gru; 907} 908 909/* 910 * gru_nopage 911 * 912 * Map the user's GRU segment 913 * 914 * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries. 915 */ 916vm_fault_t gru_fault(struct vm_fault *vmf) 917{ 918 struct vm_area_struct *vma = vmf->vma; 919 struct gru_thread_state *gts; 920 unsigned long paddr, vaddr; 921 unsigned long expires; 922 923 vaddr = vmf->address; 924 gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n", 925 vma, vaddr, GSEG_BASE(vaddr)); 926 STAT(nopfn); 927 928 /* The following check ensures vaddr is a valid address in the VMA */ 929 gts = gru_find_thread_state(vma, TSID(vaddr, vma)); 930 if (!gts) 931 return VM_FAULT_SIGBUS; 932 933again: 934 mutex_lock(>s->ts_ctxlock); 935 preempt_disable(); 936 937 gru_check_context_placement(gts); 938 939 if (!gts->ts_gru) { 940 STAT(load_user_context); 941 if (!gru_assign_gru_context(gts)) { 942 preempt_enable(); 943 mutex_unlock(>s->ts_ctxlock); 944 set_current_state(TASK_INTERRUPTIBLE); 945 schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */ 946 expires = gts->ts_steal_jiffies + GRU_STEAL_DELAY; 947 if (time_before(expires, jiffies)) 948 gru_steal_context(gts); 949 goto again; 950 } 951 gru_load_context(gts); 952 paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum); 953 remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1), 954 paddr >> PAGE_SHIFT, GRU_GSEG_PAGESIZE, 955 vma->vm_page_prot); 956 } 957 958 preempt_enable(); 959 mutex_unlock(>s->ts_ctxlock); 960 961 return VM_FAULT_NOPAGE; 962} 963