hw_queue.c (30785B)
1// SPDX-License-Identifier: GPL-2.0 2 3/* 4 * Copyright 2016-2019 HabanaLabs, Ltd. 5 * All Rights Reserved. 6 */ 7 8#include "habanalabs.h" 9 10#include <linux/slab.h> 11 12/* 13 * hl_queue_add_ptr - add to pi or ci and checks if it wraps around 14 * 15 * @ptr: the current pi/ci value 16 * @val: the amount to add 17 * 18 * Add val to ptr. It can go until twice the queue length. 19 */ 20inline u32 hl_hw_queue_add_ptr(u32 ptr, u16 val) 21{ 22 ptr += val; 23 ptr &= ((HL_QUEUE_LENGTH << 1) - 1); 24 return ptr; 25} 26static inline int queue_ci_get(atomic_t *ci, u32 queue_len) 27{ 28 return atomic_read(ci) & ((queue_len << 1) - 1); 29} 30 31static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len) 32{ 33 int delta = (q->pi - queue_ci_get(&q->ci, queue_len)); 34 35 if (delta >= 0) 36 return (queue_len - delta); 37 else 38 return (abs(delta) - queue_len); 39} 40 41void hl_hw_queue_update_ci(struct hl_cs *cs) 42{ 43 struct hl_device *hdev = cs->ctx->hdev; 44 struct hl_hw_queue *q; 45 int i; 46 47 if (hdev->disabled) 48 return; 49 50 q = &hdev->kernel_queues[0]; 51 52 /* There are no internal queues if H/W queues are being used */ 53 if (!hdev->asic_prop.max_queues || q->queue_type == QUEUE_TYPE_HW) 54 return; 55 56 /* We must increment CI for every queue that will never get a 57 * completion, there are 2 scenarios this can happen: 58 * 1. All queues of a non completion CS will never get a completion. 59 * 2. Internal queues never gets completion. 60 */ 61 for (i = 0 ; i < hdev->asic_prop.max_queues ; i++, q++) { 62 if (!cs_needs_completion(cs) || q->queue_type == QUEUE_TYPE_INT) 63 atomic_add(cs->jobs_in_queue_cnt[i], &q->ci); 64 } 65} 66 67/* 68 * hl_hw_queue_submit_bd() - Submit a buffer descriptor to an external or a 69 * H/W queue. 70 * @hdev: pointer to habanalabs device structure 71 * @q: pointer to habanalabs queue structure 72 * @ctl: BD's control word 73 * @len: BD's length 74 * @ptr: BD's pointer 75 * 76 * This function assumes there is enough space on the queue to submit a new 77 * BD to it. It initializes the next BD and calls the device specific 78 * function to set the pi (and doorbell) 79 * 80 * This function must be called when the scheduler mutex is taken 81 * 82 */ 83void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q, 84 u32 ctl, u32 len, u64 ptr) 85{ 86 struct hl_bd *bd; 87 88 bd = q->kernel_address; 89 bd += hl_pi_2_offset(q->pi); 90 bd->ctl = cpu_to_le32(ctl); 91 bd->len = cpu_to_le32(len); 92 bd->ptr = cpu_to_le64(ptr); 93 94 q->pi = hl_queue_inc_ptr(q->pi); 95 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi); 96} 97 98/* 99 * ext_queue_sanity_checks - perform some sanity checks on external queue 100 * 101 * @hdev : pointer to hl_device structure 102 * @q : pointer to hl_hw_queue structure 103 * @num_of_entries : how many entries to check for space 104 * @reserve_cq_entry : whether to reserve an entry in the cq 105 * 106 * H/W queues spinlock should be taken before calling this function 107 * 108 * Perform the following: 109 * - Make sure we have enough space in the h/w queue 110 * - Make sure we have enough space in the completion queue 111 * - Reserve space in the completion queue (needs to be reversed if there 112 * is a failure down the road before the actual submission of work). Only 113 * do this action if reserve_cq_entry is true 114 * 115 */ 116static int ext_queue_sanity_checks(struct hl_device *hdev, 117 struct hl_hw_queue *q, int num_of_entries, 118 bool reserve_cq_entry) 119{ 120 atomic_t *free_slots = 121 &hdev->completion_queue[q->cq_id].free_slots_cnt; 122 int free_slots_cnt; 123 124 /* Check we have enough space in the queue */ 125 free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH); 126 127 if (free_slots_cnt < num_of_entries) { 128 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n", 129 q->hw_queue_id, num_of_entries); 130 return -EAGAIN; 131 } 132 133 if (reserve_cq_entry) { 134 /* 135 * Check we have enough space in the completion queue 136 * Add -1 to counter (decrement) unless counter was already 0 137 * In that case, CQ is full so we can't submit a new CB because 138 * we won't get ack on its completion 139 * atomic_add_unless will return 0 if counter was already 0 140 */ 141 if (atomic_add_negative(num_of_entries * -1, free_slots)) { 142 dev_dbg(hdev->dev, "No space for %d on CQ %d\n", 143 num_of_entries, q->hw_queue_id); 144 atomic_add(num_of_entries, free_slots); 145 return -EAGAIN; 146 } 147 } 148 149 return 0; 150} 151 152/* 153 * int_queue_sanity_checks - perform some sanity checks on internal queue 154 * 155 * @hdev : pointer to hl_device structure 156 * @q : pointer to hl_hw_queue structure 157 * @num_of_entries : how many entries to check for space 158 * 159 * H/W queues spinlock should be taken before calling this function 160 * 161 * Perform the following: 162 * - Make sure we have enough space in the h/w queue 163 * 164 */ 165static int int_queue_sanity_checks(struct hl_device *hdev, 166 struct hl_hw_queue *q, 167 int num_of_entries) 168{ 169 int free_slots_cnt; 170 171 if (num_of_entries > q->int_queue_len) { 172 dev_err(hdev->dev, 173 "Cannot populate queue %u with %u jobs\n", 174 q->hw_queue_id, num_of_entries); 175 return -ENOMEM; 176 } 177 178 /* Check we have enough space in the queue */ 179 free_slots_cnt = queue_free_slots(q, q->int_queue_len); 180 181 if (free_slots_cnt < num_of_entries) { 182 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n", 183 q->hw_queue_id, num_of_entries); 184 return -EAGAIN; 185 } 186 187 return 0; 188} 189 190/* 191 * hw_queue_sanity_checks() - Make sure we have enough space in the h/w queue 192 * @hdev: Pointer to hl_device structure. 193 * @q: Pointer to hl_hw_queue structure. 194 * @num_of_entries: How many entries to check for space. 195 * 196 * Notice: We do not reserve queue entries so this function mustn't be called 197 * more than once per CS for the same queue 198 * 199 */ 200static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q, 201 int num_of_entries) 202{ 203 int free_slots_cnt; 204 205 /* Check we have enough space in the queue */ 206 free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH); 207 208 if (free_slots_cnt < num_of_entries) { 209 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n", 210 q->hw_queue_id, num_of_entries); 211 return -EAGAIN; 212 } 213 214 return 0; 215} 216 217/* 218 * hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion 219 * 220 * @hdev: pointer to hl_device structure 221 * @hw_queue_id: Queue's type 222 * @cb_size: size of CB 223 * @cb_ptr: pointer to CB location 224 * 225 * This function sends a single CB, that must NOT generate a completion entry. 226 * Sending CPU messages can be done instead via 'hl_hw_queue_submit_bd()' 227 */ 228int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id, 229 u32 cb_size, u64 cb_ptr) 230{ 231 struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id]; 232 int rc = 0; 233 234 hdev->asic_funcs->hw_queues_lock(hdev); 235 236 if (hdev->disabled) { 237 rc = -EPERM; 238 goto out; 239 } 240 241 /* 242 * hl_hw_queue_send_cb_no_cmpl() is called for queues of a H/W queue 243 * type only on init phase, when the queues are empty and being tested, 244 * so there is no need for sanity checks. 245 */ 246 if (q->queue_type != QUEUE_TYPE_HW) { 247 rc = ext_queue_sanity_checks(hdev, q, 1, false); 248 if (rc) 249 goto out; 250 } 251 252 hl_hw_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr); 253 254out: 255 hdev->asic_funcs->hw_queues_unlock(hdev); 256 257 return rc; 258} 259 260/* 261 * ext_queue_schedule_job - submit a JOB to an external queue 262 * 263 * @job: pointer to the job that needs to be submitted to the queue 264 * 265 * This function must be called when the scheduler mutex is taken 266 * 267 */ 268static void ext_queue_schedule_job(struct hl_cs_job *job) 269{ 270 struct hl_device *hdev = job->cs->ctx->hdev; 271 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; 272 struct hl_cq_entry cq_pkt; 273 struct hl_cq *cq; 274 u64 cq_addr; 275 struct hl_cb *cb; 276 u32 ctl; 277 u32 len; 278 u64 ptr; 279 280 /* 281 * Update the JOB ID inside the BD CTL so the device would know what 282 * to write in the completion queue 283 */ 284 ctl = ((q->pi << BD_CTL_SHADOW_INDEX_SHIFT) & BD_CTL_SHADOW_INDEX_MASK); 285 286 cb = job->patched_cb; 287 len = job->job_cb_size; 288 ptr = cb->bus_address; 289 290 /* Skip completion flow in case this is a non completion CS */ 291 if (!cs_needs_completion(job->cs)) 292 goto submit_bd; 293 294 cq_pkt.data = cpu_to_le32( 295 ((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT) 296 & CQ_ENTRY_SHADOW_INDEX_MASK) | 297 FIELD_PREP(CQ_ENTRY_SHADOW_INDEX_VALID_MASK, 1) | 298 FIELD_PREP(CQ_ENTRY_READY_MASK, 1)); 299 300 /* 301 * No need to protect pi_offset because scheduling to the 302 * H/W queues is done under the scheduler mutex 303 * 304 * No need to check if CQ is full because it was already 305 * checked in ext_queue_sanity_checks 306 */ 307 cq = &hdev->completion_queue[q->cq_id]; 308 cq_addr = cq->bus_address + cq->pi * sizeof(struct hl_cq_entry); 309 310 hdev->asic_funcs->add_end_of_cb_packets(hdev, cb->kernel_address, len, 311 cq_addr, 312 le32_to_cpu(cq_pkt.data), 313 q->msi_vec, 314 job->contains_dma_pkt); 315 316 q->shadow_queue[hl_pi_2_offset(q->pi)] = job; 317 318 cq->pi = hl_cq_inc_ptr(cq->pi); 319 320submit_bd: 321 hl_hw_queue_submit_bd(hdev, q, ctl, len, ptr); 322} 323 324/* 325 * int_queue_schedule_job - submit a JOB to an internal queue 326 * 327 * @job: pointer to the job that needs to be submitted to the queue 328 * 329 * This function must be called when the scheduler mutex is taken 330 * 331 */ 332static void int_queue_schedule_job(struct hl_cs_job *job) 333{ 334 struct hl_device *hdev = job->cs->ctx->hdev; 335 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; 336 struct hl_bd bd; 337 __le64 *pi; 338 339 bd.ctl = 0; 340 bd.len = cpu_to_le32(job->job_cb_size); 341 342 if (job->is_kernel_allocated_cb) 343 /* bus_address is actually a mmu mapped address 344 * allocated from an internal pool 345 */ 346 bd.ptr = cpu_to_le64(job->user_cb->bus_address); 347 else 348 bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb); 349 350 pi = q->kernel_address + (q->pi & (q->int_queue_len - 1)) * sizeof(bd); 351 352 q->pi++; 353 q->pi &= ((q->int_queue_len << 1) - 1); 354 355 hdev->asic_funcs->pqe_write(hdev, pi, &bd); 356 357 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi); 358} 359 360/* 361 * hw_queue_schedule_job - submit a JOB to a H/W queue 362 * 363 * @job: pointer to the job that needs to be submitted to the queue 364 * 365 * This function must be called when the scheduler mutex is taken 366 * 367 */ 368static void hw_queue_schedule_job(struct hl_cs_job *job) 369{ 370 struct hl_device *hdev = job->cs->ctx->hdev; 371 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; 372 u64 ptr; 373 u32 offset, ctl, len; 374 375 /* 376 * Upon PQE completion, COMP_DATA is used as the write data to the 377 * completion queue (QMAN HBW message), and COMP_OFFSET is used as the 378 * write address offset in the SM block (QMAN LBW message). 379 * The write address offset is calculated as "COMP_OFFSET << 2". 380 */ 381 offset = job->cs->sequence & (hdev->asic_prop.max_pending_cs - 1); 382 ctl = ((offset << BD_CTL_COMP_OFFSET_SHIFT) & BD_CTL_COMP_OFFSET_MASK) | 383 ((q->pi << BD_CTL_COMP_DATA_SHIFT) & BD_CTL_COMP_DATA_MASK); 384 385 len = job->job_cb_size; 386 387 /* 388 * A patched CB is created only if a user CB was allocated by driver and 389 * MMU is disabled. If MMU is enabled, the user CB should be used 390 * instead. If the user CB wasn't allocated by driver, assume that it 391 * holds an address. 392 */ 393 if (job->patched_cb) 394 ptr = job->patched_cb->bus_address; 395 else if (job->is_kernel_allocated_cb) 396 ptr = job->user_cb->bus_address; 397 else 398 ptr = (u64) (uintptr_t) job->user_cb; 399 400 hl_hw_queue_submit_bd(hdev, q, ctl, len, ptr); 401} 402 403static int init_signal_cs(struct hl_device *hdev, 404 struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl) 405{ 406 struct hl_sync_stream_properties *prop; 407 struct hl_hw_sob *hw_sob; 408 u32 q_idx; 409 int rc = 0; 410 411 q_idx = job->hw_queue_id; 412 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; 413 hw_sob = &prop->hw_sob[prop->curr_sob_offset]; 414 415 cs_cmpl->hw_sob = hw_sob; 416 cs_cmpl->sob_val = prop->next_sob_val; 417 418 dev_dbg(hdev->dev, 419 "generate signal CB, sob_id: %d, sob val: %u, q_idx: %d, seq: %llu\n", 420 cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx, 421 cs_cmpl->cs_seq); 422 423 /* we set an EB since we must make sure all oeprations are done 424 * when sending the signal 425 */ 426 hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb, 427 cs_cmpl->hw_sob->sob_id, 0, true); 428 429 rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, 1, 430 false); 431 432 job->cs->sob_addr_offset = hw_sob->sob_addr; 433 job->cs->initial_sob_count = prop->next_sob_val - 1; 434 435 return rc; 436} 437 438void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev, 439 struct hl_cs *cs, struct hl_cs_job *job, 440 struct hl_cs_compl *cs_cmpl) 441{ 442 struct hl_cs_encaps_sig_handle *handle = cs->encaps_sig_hdl; 443 u32 offset = 0; 444 445 cs_cmpl->hw_sob = handle->hw_sob; 446 447 /* Note that encaps_sig_wait_offset was validated earlier in the flow 448 * for offset value which exceeds the max reserved signal count. 449 * always decrement 1 of the offset since when the user 450 * set offset 1 for example he mean to wait only for the first 451 * signal only, which will be pre_sob_val, and if he set offset 2 452 * then the value required is (pre_sob_val + 1) and so on... 453 * if user set wait offset to 0, then treat it as legacy wait cs, 454 * wait for the next signal. 455 */ 456 if (job->encaps_sig_wait_offset) 457 offset = job->encaps_sig_wait_offset - 1; 458 459 cs_cmpl->sob_val = handle->pre_sob_val + offset; 460} 461 462static int init_wait_cs(struct hl_device *hdev, struct hl_cs *cs, 463 struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl) 464{ 465 struct hl_gen_wait_properties wait_prop; 466 struct hl_sync_stream_properties *prop; 467 struct hl_cs_compl *signal_cs_cmpl; 468 u32 q_idx; 469 470 q_idx = job->hw_queue_id; 471 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; 472 473 signal_cs_cmpl = container_of(cs->signal_fence, 474 struct hl_cs_compl, 475 base_fence); 476 477 if (cs->encaps_signals) { 478 /* use the encaps signal handle stored earlier in the flow 479 * and set the SOB information from the encaps 480 * signals handle 481 */ 482 hl_hw_queue_encaps_sig_set_sob_info(hdev, cs, job, cs_cmpl); 483 484 dev_dbg(hdev->dev, "Wait for encaps signals handle, qidx(%u), CS sequence(%llu), sob val: 0x%x, offset: %u\n", 485 cs->encaps_sig_hdl->q_idx, 486 cs->encaps_sig_hdl->cs_seq, 487 cs_cmpl->sob_val, 488 job->encaps_sig_wait_offset); 489 } else { 490 /* Copy the SOB id and value of the signal CS */ 491 cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob; 492 cs_cmpl->sob_val = signal_cs_cmpl->sob_val; 493 } 494 495 /* check again if the signal cs already completed. 496 * if yes then don't send any wait cs since the hw_sob 497 * could be in reset already. if signal is not completed 498 * then get refcount to hw_sob to prevent resetting the sob 499 * while wait cs is not submitted. 500 * note that this check is protected by two locks, 501 * hw queue lock and completion object lock, 502 * and the same completion object lock also protects 503 * the hw_sob reset handler function. 504 * The hw_queue lock prevent out of sync of hw_sob 505 * refcount value, changed by signal/wait flows. 506 */ 507 spin_lock(&signal_cs_cmpl->lock); 508 509 if (completion_done(&cs->signal_fence->completion)) { 510 spin_unlock(&signal_cs_cmpl->lock); 511 return -EINVAL; 512 } 513 514 kref_get(&cs_cmpl->hw_sob->kref); 515 516 spin_unlock(&signal_cs_cmpl->lock); 517 518 dev_dbg(hdev->dev, 519 "generate wait CB, sob_id: %d, sob_val: 0x%x, mon_id: %d, q_idx: %d, seq: %llu\n", 520 cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, 521 prop->base_mon_id, q_idx, cs->sequence); 522 523 wait_prop.data = (void *) job->patched_cb; 524 wait_prop.sob_base = cs_cmpl->hw_sob->sob_id; 525 wait_prop.sob_mask = 0x1; 526 wait_prop.sob_val = cs_cmpl->sob_val; 527 wait_prop.mon_id = prop->base_mon_id; 528 wait_prop.q_idx = q_idx; 529 wait_prop.size = 0; 530 531 hdev->asic_funcs->gen_wait_cb(hdev, &wait_prop); 532 533 mb(); 534 hl_fence_put(cs->signal_fence); 535 cs->signal_fence = NULL; 536 537 return 0; 538} 539 540/* 541 * init_signal_wait_cs - initialize a signal/wait CS 542 * @cs: pointer to the signal/wait CS 543 * 544 * H/W queues spinlock should be taken before calling this function 545 */ 546static int init_signal_wait_cs(struct hl_cs *cs) 547{ 548 struct hl_ctx *ctx = cs->ctx; 549 struct hl_device *hdev = ctx->hdev; 550 struct hl_cs_job *job; 551 struct hl_cs_compl *cs_cmpl = 552 container_of(cs->fence, struct hl_cs_compl, base_fence); 553 int rc = 0; 554 555 /* There is only one job in a signal/wait CS */ 556 job = list_first_entry(&cs->job_list, struct hl_cs_job, 557 cs_node); 558 559 if (cs->type & CS_TYPE_SIGNAL) 560 rc = init_signal_cs(hdev, job, cs_cmpl); 561 else if (cs->type & CS_TYPE_WAIT) 562 rc = init_wait_cs(hdev, cs, job, cs_cmpl); 563 564 return rc; 565} 566 567static int encaps_sig_first_staged_cs_handler 568 (struct hl_device *hdev, struct hl_cs *cs) 569{ 570 struct hl_cs_compl *cs_cmpl = 571 container_of(cs->fence, 572 struct hl_cs_compl, base_fence); 573 struct hl_cs_encaps_sig_handle *encaps_sig_hdl; 574 struct hl_encaps_signals_mgr *mgr; 575 int rc = 0; 576 577 mgr = &cs->ctx->sig_mgr; 578 579 spin_lock(&mgr->lock); 580 encaps_sig_hdl = idr_find(&mgr->handles, cs->encaps_sig_hdl_id); 581 if (encaps_sig_hdl) { 582 /* 583 * Set handler CS sequence, 584 * the CS which contains the encapsulated signals. 585 */ 586 encaps_sig_hdl->cs_seq = cs->sequence; 587 /* store the handle and set encaps signal indication, 588 * to be used later in cs_do_release to put the last 589 * reference to encaps signals handlers. 590 */ 591 cs_cmpl->encaps_signals = true; 592 cs_cmpl->encaps_sig_hdl = encaps_sig_hdl; 593 594 /* set hw_sob pointer in completion object 595 * since it's used in cs_do_release flow to put 596 * refcount to sob 597 */ 598 cs_cmpl->hw_sob = encaps_sig_hdl->hw_sob; 599 cs_cmpl->sob_val = encaps_sig_hdl->pre_sob_val + 600 encaps_sig_hdl->count; 601 602 dev_dbg(hdev->dev, "CS seq (%llu) added to encaps signal handler id (%u), count(%u), qidx(%u), sob(%u), val(%u)\n", 603 cs->sequence, encaps_sig_hdl->id, 604 encaps_sig_hdl->count, 605 encaps_sig_hdl->q_idx, 606 cs_cmpl->hw_sob->sob_id, 607 cs_cmpl->sob_val); 608 609 } else { 610 dev_err(hdev->dev, "encaps handle id(%u) wasn't found!\n", 611 cs->encaps_sig_hdl_id); 612 rc = -EINVAL; 613 } 614 615 spin_unlock(&mgr->lock); 616 617 return rc; 618} 619 620/* 621 * hl_hw_queue_schedule_cs - schedule a command submission 622 * @cs: pointer to the CS 623 */ 624int hl_hw_queue_schedule_cs(struct hl_cs *cs) 625{ 626 enum hl_device_status status; 627 struct hl_cs_counters_atomic *cntr; 628 struct hl_ctx *ctx = cs->ctx; 629 struct hl_device *hdev = ctx->hdev; 630 struct hl_cs_job *job, *tmp; 631 struct hl_hw_queue *q; 632 int rc = 0, i, cq_cnt; 633 bool first_entry; 634 u32 max_queues; 635 636 cntr = &hdev->aggregated_cs_counters; 637 638 hdev->asic_funcs->hw_queues_lock(hdev); 639 640 if (!hl_device_operational(hdev, &status)) { 641 atomic64_inc(&cntr->device_in_reset_drop_cnt); 642 atomic64_inc(&ctx->cs_counters.device_in_reset_drop_cnt); 643 dev_err(hdev->dev, 644 "device is %s, CS rejected!\n", hdev->status[status]); 645 rc = -EPERM; 646 goto out; 647 } 648 649 max_queues = hdev->asic_prop.max_queues; 650 651 q = &hdev->kernel_queues[0]; 652 for (i = 0, cq_cnt = 0 ; i < max_queues ; i++, q++) { 653 if (cs->jobs_in_queue_cnt[i]) { 654 switch (q->queue_type) { 655 case QUEUE_TYPE_EXT: 656 rc = ext_queue_sanity_checks(hdev, q, 657 cs->jobs_in_queue_cnt[i], 658 cs_needs_completion(cs) ? 659 true : false); 660 break; 661 case QUEUE_TYPE_INT: 662 rc = int_queue_sanity_checks(hdev, q, 663 cs->jobs_in_queue_cnt[i]); 664 break; 665 case QUEUE_TYPE_HW: 666 rc = hw_queue_sanity_checks(hdev, q, 667 cs->jobs_in_queue_cnt[i]); 668 break; 669 default: 670 dev_err(hdev->dev, "Queue type %d is invalid\n", 671 q->queue_type); 672 rc = -EINVAL; 673 break; 674 } 675 676 if (rc) { 677 atomic64_inc( 678 &ctx->cs_counters.queue_full_drop_cnt); 679 atomic64_inc(&cntr->queue_full_drop_cnt); 680 goto unroll_cq_resv; 681 } 682 683 if (q->queue_type == QUEUE_TYPE_EXT) 684 cq_cnt++; 685 } 686 } 687 688 if ((cs->type == CS_TYPE_SIGNAL) || (cs->type == CS_TYPE_WAIT)) { 689 rc = init_signal_wait_cs(cs); 690 if (rc) 691 goto unroll_cq_resv; 692 } else if (cs->type == CS_TYPE_COLLECTIVE_WAIT) { 693 rc = hdev->asic_funcs->collective_wait_init_cs(cs); 694 if (rc) 695 goto unroll_cq_resv; 696 } 697 698 699 if (cs->encaps_signals && cs->staged_first) { 700 rc = encaps_sig_first_staged_cs_handler(hdev, cs); 701 if (rc) 702 goto unroll_cq_resv; 703 } 704 705 spin_lock(&hdev->cs_mirror_lock); 706 707 /* Verify staged CS exists and add to the staged list */ 708 if (cs->staged_cs && !cs->staged_first) { 709 struct hl_cs *staged_cs; 710 711 staged_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence); 712 if (!staged_cs) { 713 dev_err(hdev->dev, 714 "Cannot find staged submission sequence %llu", 715 cs->staged_sequence); 716 rc = -EINVAL; 717 goto unlock_cs_mirror; 718 } 719 720 if (is_staged_cs_last_exists(hdev, staged_cs)) { 721 dev_err(hdev->dev, 722 "Staged submission sequence %llu already submitted", 723 cs->staged_sequence); 724 rc = -EINVAL; 725 goto unlock_cs_mirror; 726 } 727 728 list_add_tail(&cs->staged_cs_node, &staged_cs->staged_cs_node); 729 730 /* update stream map of the first CS */ 731 if (hdev->supports_wait_for_multi_cs) 732 staged_cs->fence->stream_master_qid_map |= 733 cs->fence->stream_master_qid_map; 734 } 735 736 list_add_tail(&cs->mirror_node, &hdev->cs_mirror_list); 737 738 /* Queue TDR if the CS is the first entry and if timeout is wanted */ 739 first_entry = list_first_entry(&hdev->cs_mirror_list, 740 struct hl_cs, mirror_node) == cs; 741 if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) && 742 first_entry && cs_needs_timeout(cs)) { 743 cs->tdr_active = true; 744 schedule_delayed_work(&cs->work_tdr, cs->timeout_jiffies); 745 746 } 747 748 spin_unlock(&hdev->cs_mirror_lock); 749 750 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) 751 switch (job->queue_type) { 752 case QUEUE_TYPE_EXT: 753 ext_queue_schedule_job(job); 754 break; 755 case QUEUE_TYPE_INT: 756 int_queue_schedule_job(job); 757 break; 758 case QUEUE_TYPE_HW: 759 hw_queue_schedule_job(job); 760 break; 761 default: 762 break; 763 } 764 765 cs->submitted = true; 766 767 goto out; 768 769unlock_cs_mirror: 770 spin_unlock(&hdev->cs_mirror_lock); 771unroll_cq_resv: 772 q = &hdev->kernel_queues[0]; 773 for (i = 0 ; (i < max_queues) && (cq_cnt > 0) ; i++, q++) { 774 if ((q->queue_type == QUEUE_TYPE_EXT) && 775 (cs->jobs_in_queue_cnt[i])) { 776 atomic_t *free_slots = 777 &hdev->completion_queue[i].free_slots_cnt; 778 atomic_add(cs->jobs_in_queue_cnt[i], free_slots); 779 cq_cnt--; 780 } 781 } 782 783out: 784 hdev->asic_funcs->hw_queues_unlock(hdev); 785 786 return rc; 787} 788 789/* 790 * hl_hw_queue_inc_ci_kernel - increment ci for kernel's queue 791 * 792 * @hdev: pointer to hl_device structure 793 * @hw_queue_id: which queue to increment its ci 794 */ 795void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id) 796{ 797 struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id]; 798 799 atomic_inc(&q->ci); 800} 801 802static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q, 803 bool is_cpu_queue) 804{ 805 void *p; 806 int rc; 807 808 if (is_cpu_queue) 809 p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, 810 HL_QUEUE_SIZE_IN_BYTES, 811 &q->bus_address); 812 else 813 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, 814 HL_QUEUE_SIZE_IN_BYTES, 815 &q->bus_address, 816 GFP_KERNEL | __GFP_ZERO); 817 if (!p) 818 return -ENOMEM; 819 820 q->kernel_address = p; 821 822 q->shadow_queue = kmalloc_array(HL_QUEUE_LENGTH, 823 sizeof(*q->shadow_queue), 824 GFP_KERNEL); 825 if (!q->shadow_queue) { 826 dev_err(hdev->dev, 827 "Failed to allocate shadow queue for H/W queue %d\n", 828 q->hw_queue_id); 829 rc = -ENOMEM; 830 goto free_queue; 831 } 832 833 /* Make sure read/write pointers are initialized to start of queue */ 834 atomic_set(&q->ci, 0); 835 q->pi = 0; 836 837 return 0; 838 839free_queue: 840 if (is_cpu_queue) 841 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, 842 HL_QUEUE_SIZE_IN_BYTES, 843 q->kernel_address); 844 else 845 hdev->asic_funcs->asic_dma_free_coherent(hdev, 846 HL_QUEUE_SIZE_IN_BYTES, 847 q->kernel_address, 848 q->bus_address); 849 850 return rc; 851} 852 853static int int_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) 854{ 855 void *p; 856 857 p = hdev->asic_funcs->get_int_queue_base(hdev, q->hw_queue_id, 858 &q->bus_address, &q->int_queue_len); 859 if (!p) { 860 dev_err(hdev->dev, 861 "Failed to get base address for internal queue %d\n", 862 q->hw_queue_id); 863 return -EFAULT; 864 } 865 866 q->kernel_address = p; 867 q->pi = 0; 868 atomic_set(&q->ci, 0); 869 870 return 0; 871} 872 873static int cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) 874{ 875 return ext_and_cpu_queue_init(hdev, q, true); 876} 877 878static int ext_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) 879{ 880 return ext_and_cpu_queue_init(hdev, q, false); 881} 882 883static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) 884{ 885 void *p; 886 887 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, 888 HL_QUEUE_SIZE_IN_BYTES, 889 &q->bus_address, 890 GFP_KERNEL | __GFP_ZERO); 891 if (!p) 892 return -ENOMEM; 893 894 q->kernel_address = p; 895 896 /* Make sure read/write pointers are initialized to start of queue */ 897 atomic_set(&q->ci, 0); 898 q->pi = 0; 899 900 return 0; 901} 902 903static void sync_stream_queue_init(struct hl_device *hdev, u32 q_idx) 904{ 905 struct hl_sync_stream_properties *sync_stream_prop; 906 struct asic_fixed_properties *prop = &hdev->asic_prop; 907 struct hl_hw_sob *hw_sob; 908 int sob, reserved_mon_idx, queue_idx; 909 910 sync_stream_prop = &hdev->kernel_queues[q_idx].sync_stream_prop; 911 912 /* We use 'collective_mon_idx' as a running index in order to reserve 913 * monitors for collective master/slave queues. 914 * collective master queue gets 2 reserved monitors 915 * collective slave queue gets 1 reserved monitor 916 */ 917 if (hdev->kernel_queues[q_idx].collective_mode == 918 HL_COLLECTIVE_MASTER) { 919 reserved_mon_idx = hdev->collective_mon_idx; 920 921 /* reserve the first monitor for collective master queue */ 922 sync_stream_prop->collective_mstr_mon_id[0] = 923 prop->collective_first_mon + reserved_mon_idx; 924 925 /* reserve the second monitor for collective master queue */ 926 sync_stream_prop->collective_mstr_mon_id[1] = 927 prop->collective_first_mon + reserved_mon_idx + 1; 928 929 hdev->collective_mon_idx += HL_COLLECTIVE_RSVD_MSTR_MONS; 930 } else if (hdev->kernel_queues[q_idx].collective_mode == 931 HL_COLLECTIVE_SLAVE) { 932 reserved_mon_idx = hdev->collective_mon_idx++; 933 934 /* reserve a monitor for collective slave queue */ 935 sync_stream_prop->collective_slave_mon_id = 936 prop->collective_first_mon + reserved_mon_idx; 937 } 938 939 if (!hdev->kernel_queues[q_idx].supports_sync_stream) 940 return; 941 942 queue_idx = hdev->sync_stream_queue_idx++; 943 944 sync_stream_prop->base_sob_id = prop->sync_stream_first_sob + 945 (queue_idx * HL_RSVD_SOBS); 946 sync_stream_prop->base_mon_id = prop->sync_stream_first_mon + 947 (queue_idx * HL_RSVD_MONS); 948 sync_stream_prop->next_sob_val = 1; 949 sync_stream_prop->curr_sob_offset = 0; 950 951 for (sob = 0 ; sob < HL_RSVD_SOBS ; sob++) { 952 hw_sob = &sync_stream_prop->hw_sob[sob]; 953 hw_sob->hdev = hdev; 954 hw_sob->sob_id = sync_stream_prop->base_sob_id + sob; 955 hw_sob->sob_addr = 956 hdev->asic_funcs->get_sob_addr(hdev, hw_sob->sob_id); 957 hw_sob->q_idx = q_idx; 958 kref_init(&hw_sob->kref); 959 } 960} 961 962static void sync_stream_queue_reset(struct hl_device *hdev, u32 q_idx) 963{ 964 struct hl_sync_stream_properties *prop = 965 &hdev->kernel_queues[q_idx].sync_stream_prop; 966 967 /* 968 * In case we got here due to a stuck CS, the refcnt might be bigger 969 * than 1 and therefore we reset it. 970 */ 971 kref_init(&prop->hw_sob[prop->curr_sob_offset].kref); 972 prop->curr_sob_offset = 0; 973 prop->next_sob_val = 1; 974} 975 976/* 977 * queue_init - main initialization function for H/W queue object 978 * 979 * @hdev: pointer to hl_device device structure 980 * @q: pointer to hl_hw_queue queue structure 981 * @hw_queue_id: The id of the H/W queue 982 * 983 * Allocate dma-able memory for the queue and initialize fields 984 * Returns 0 on success 985 */ 986static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q, 987 u32 hw_queue_id) 988{ 989 int rc; 990 991 q->hw_queue_id = hw_queue_id; 992 993 switch (q->queue_type) { 994 case QUEUE_TYPE_EXT: 995 rc = ext_queue_init(hdev, q); 996 break; 997 case QUEUE_TYPE_INT: 998 rc = int_queue_init(hdev, q); 999 break; 1000 case QUEUE_TYPE_CPU: 1001 rc = cpu_queue_init(hdev, q); 1002 break; 1003 case QUEUE_TYPE_HW: 1004 rc = hw_queue_init(hdev, q); 1005 break; 1006 case QUEUE_TYPE_NA: 1007 q->valid = 0; 1008 return 0; 1009 default: 1010 dev_crit(hdev->dev, "wrong queue type %d during init\n", 1011 q->queue_type); 1012 rc = -EINVAL; 1013 break; 1014 } 1015 1016 sync_stream_queue_init(hdev, q->hw_queue_id); 1017 1018 if (rc) 1019 return rc; 1020 1021 q->valid = 1; 1022 1023 return 0; 1024} 1025 1026/* 1027 * hw_queue_fini - destroy queue 1028 * 1029 * @hdev: pointer to hl_device device structure 1030 * @q: pointer to hl_hw_queue queue structure 1031 * 1032 * Free the queue memory 1033 */ 1034static void queue_fini(struct hl_device *hdev, struct hl_hw_queue *q) 1035{ 1036 if (!q->valid) 1037 return; 1038 1039 /* 1040 * If we arrived here, there are no jobs waiting on this queue 1041 * so we can safely remove it. 1042 * This is because this function can only called when: 1043 * 1. Either a context is deleted, which only can occur if all its 1044 * jobs were finished 1045 * 2. A context wasn't able to be created due to failure or timeout, 1046 * which means there are no jobs on the queue yet 1047 * 1048 * The only exception are the queues of the kernel context, but 1049 * if they are being destroyed, it means that the entire module is 1050 * being removed. If the module is removed, it means there is no open 1051 * user context. It also means that if a job was submitted by 1052 * the kernel driver (e.g. context creation), the job itself was 1053 * released by the kernel driver when a timeout occurred on its 1054 * Completion. Thus, we don't need to release it again. 1055 */ 1056 1057 if (q->queue_type == QUEUE_TYPE_INT) 1058 return; 1059 1060 kfree(q->shadow_queue); 1061 1062 if (q->queue_type == QUEUE_TYPE_CPU) 1063 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, 1064 HL_QUEUE_SIZE_IN_BYTES, 1065 q->kernel_address); 1066 else 1067 hdev->asic_funcs->asic_dma_free_coherent(hdev, 1068 HL_QUEUE_SIZE_IN_BYTES, 1069 q->kernel_address, 1070 q->bus_address); 1071} 1072 1073int hl_hw_queues_create(struct hl_device *hdev) 1074{ 1075 struct asic_fixed_properties *asic = &hdev->asic_prop; 1076 struct hl_hw_queue *q; 1077 int i, rc, q_ready_cnt; 1078 1079 hdev->kernel_queues = kcalloc(asic->max_queues, 1080 sizeof(*hdev->kernel_queues), GFP_KERNEL); 1081 1082 if (!hdev->kernel_queues) { 1083 dev_err(hdev->dev, "Not enough memory for H/W queues\n"); 1084 return -ENOMEM; 1085 } 1086 1087 /* Initialize the H/W queues */ 1088 for (i = 0, q_ready_cnt = 0, q = hdev->kernel_queues; 1089 i < asic->max_queues ; i++, q_ready_cnt++, q++) { 1090 1091 q->queue_type = asic->hw_queues_props[i].type; 1092 q->supports_sync_stream = 1093 asic->hw_queues_props[i].supports_sync_stream; 1094 q->collective_mode = asic->hw_queues_props[i].collective_mode; 1095 rc = queue_init(hdev, q, i); 1096 if (rc) { 1097 dev_err(hdev->dev, 1098 "failed to initialize queue %d\n", i); 1099 goto release_queues; 1100 } 1101 } 1102 1103 return 0; 1104 1105release_queues: 1106 for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++) 1107 queue_fini(hdev, q); 1108 1109 kfree(hdev->kernel_queues); 1110 1111 return rc; 1112} 1113 1114void hl_hw_queues_destroy(struct hl_device *hdev) 1115{ 1116 struct hl_hw_queue *q; 1117 u32 max_queues = hdev->asic_prop.max_queues; 1118 int i; 1119 1120 for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++) 1121 queue_fini(hdev, q); 1122 1123 kfree(hdev->kernel_queues); 1124} 1125 1126void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset) 1127{ 1128 struct hl_hw_queue *q; 1129 u32 max_queues = hdev->asic_prop.max_queues; 1130 int i; 1131 1132 for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++) { 1133 if ((!q->valid) || 1134 ((!hard_reset) && (q->queue_type == QUEUE_TYPE_CPU))) 1135 continue; 1136 q->pi = 0; 1137 atomic_set(&q->ci, 0); 1138 1139 if (q->supports_sync_stream) 1140 sync_stream_queue_reset(hdev, q->hw_queue_id); 1141 } 1142}