iowait.h (10727B)
1/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ 2/* 3 * Copyright(c) 2015 - 2018 Intel Corporation. 4 */ 5 6#ifndef _HFI1_IOWAIT_H 7#define _HFI1_IOWAIT_H 8 9#include <linux/list.h> 10#include <linux/workqueue.h> 11#include <linux/wait.h> 12#include <linux/sched.h> 13 14#include "sdma_txreq.h" 15 16/* 17 * typedef (*restart_t)() - restart callback 18 * @work: pointer to work structure 19 */ 20typedef void (*restart_t)(struct work_struct *work); 21 22#define IOWAIT_PENDING_IB 0x0 23#define IOWAIT_PENDING_TID 0x1 24 25/* 26 * A QP can have multiple Send Engines (SEs). 27 * 28 * The current use case is for supporting a TID RDMA 29 * packet build/xmit mechanism independent from verbs. 30 */ 31#define IOWAIT_SES 2 32#define IOWAIT_IB_SE 0 33#define IOWAIT_TID_SE 1 34 35struct sdma_txreq; 36struct sdma_engine; 37/** 38 * @iowork: the work struct 39 * @tx_head: list of prebuilt packets 40 * @iow: the parent iowait structure 41 * 42 * This structure is the work item (process) specific 43 * details associated with the each of the two SEs of the 44 * QP. 45 * 46 * The workstruct and the queued TXs are unique to each 47 * SE. 48 */ 49struct iowait; 50struct iowait_work { 51 struct work_struct iowork; 52 struct list_head tx_head; 53 struct iowait *iow; 54}; 55 56/** 57 * @list: used to add/insert into QP/PQ wait lists 58 * @tx_head: overflow list of sdma_txreq's 59 * @sleep: no space callback 60 * @wakeup: space callback wakeup 61 * @sdma_drained: sdma count drained 62 * @init_priority: callback to manipulate priority 63 * @lock: lock protected head of wait queue 64 * @iowork: workqueue overhead 65 * @wait_dma: wait for sdma_busy == 0 66 * @wait_pio: wait for pio_busy == 0 67 * @sdma_busy: # of packets in flight 68 * @count: total number of descriptors in tx_head'ed list 69 * @tx_limit: limit for overflow queuing 70 * @tx_count: number of tx entry's in tx_head'ed list 71 * @flags: wait flags (one per QP) 72 * @wait: SE array for multiple legs 73 * 74 * This is to be embedded in user's state structure 75 * (QP or PQ). 76 * 77 * The sleep and wakeup members are a 78 * bit misnamed. They do not strictly 79 * speaking sleep or wake up, but they 80 * are callbacks for the ULP to implement 81 * what ever queuing/dequeuing of 82 * the embedded iowait and its containing struct 83 * when a resource shortage like SDMA ring space 84 * or PIO credit space is seen. 85 * 86 * Both potentially have locks help 87 * so sleeping is not allowed and it is not 88 * supported to submit txreqs from the wakeup 89 * call directly because of lock conflicts. 90 * 91 * The wait_dma member along with the iow 92 * 93 * The lock field is used by waiters to record 94 * the seqlock_t that guards the list head. 95 * Waiters explicity know that, but the destroy 96 * code that unwaits QPs does not. 97 */ 98struct iowait { 99 struct list_head list; 100 int (*sleep)( 101 struct sdma_engine *sde, 102 struct iowait_work *wait, 103 struct sdma_txreq *tx, 104 uint seq, 105 bool pkts_sent 106 ); 107 void (*wakeup)(struct iowait *wait, int reason); 108 void (*sdma_drained)(struct iowait *wait); 109 void (*init_priority)(struct iowait *wait); 110 seqlock_t *lock; 111 wait_queue_head_t wait_dma; 112 wait_queue_head_t wait_pio; 113 atomic_t sdma_busy; 114 atomic_t pio_busy; 115 u32 count; 116 u32 tx_limit; 117 u32 tx_count; 118 u8 starved_cnt; 119 u8 priority; 120 unsigned long flags; 121 struct iowait_work wait[IOWAIT_SES]; 122}; 123 124#define SDMA_AVAIL_REASON 0 125 126void iowait_set_flag(struct iowait *wait, u32 flag); 127bool iowait_flag_set(struct iowait *wait, u32 flag); 128void iowait_clear_flag(struct iowait *wait, u32 flag); 129 130void iowait_init(struct iowait *wait, u32 tx_limit, 131 void (*func)(struct work_struct *work), 132 void (*tidfunc)(struct work_struct *work), 133 int (*sleep)(struct sdma_engine *sde, 134 struct iowait_work *wait, 135 struct sdma_txreq *tx, 136 uint seq, 137 bool pkts_sent), 138 void (*wakeup)(struct iowait *wait, int reason), 139 void (*sdma_drained)(struct iowait *wait), 140 void (*init_priority)(struct iowait *wait)); 141 142/** 143 * iowait_schedule() - schedule the default send engine work 144 * @wait: wait struct to schedule 145 * @wq: workqueue for schedule 146 * @cpu: cpu 147 */ 148static inline bool iowait_schedule(struct iowait *wait, 149 struct workqueue_struct *wq, int cpu) 150{ 151 return !!queue_work_on(cpu, wq, &wait->wait[IOWAIT_IB_SE].iowork); 152} 153 154/** 155 * iowait_tid_schedule - schedule the tid SE 156 * @wait: the iowait structure 157 * @wq: the work queue 158 * @cpu: the cpu 159 */ 160static inline bool iowait_tid_schedule(struct iowait *wait, 161 struct workqueue_struct *wq, int cpu) 162{ 163 return !!queue_work_on(cpu, wq, &wait->wait[IOWAIT_TID_SE].iowork); 164} 165 166/** 167 * iowait_sdma_drain() - wait for DMAs to drain 168 * 169 * @wait: iowait structure 170 * 171 * This will delay until the iowait sdmas have 172 * completed. 173 */ 174static inline void iowait_sdma_drain(struct iowait *wait) 175{ 176 wait_event(wait->wait_dma, !atomic_read(&wait->sdma_busy)); 177} 178 179/** 180 * iowait_sdma_pending() - return sdma pending count 181 * 182 * @wait: iowait structure 183 * 184 */ 185static inline int iowait_sdma_pending(struct iowait *wait) 186{ 187 return atomic_read(&wait->sdma_busy); 188} 189 190/** 191 * iowait_sdma_inc - note sdma io pending 192 * @wait: iowait structure 193 */ 194static inline void iowait_sdma_inc(struct iowait *wait) 195{ 196 atomic_inc(&wait->sdma_busy); 197} 198 199/** 200 * iowait_sdma_add - add count to pending 201 * @wait: iowait structure 202 */ 203static inline void iowait_sdma_add(struct iowait *wait, int count) 204{ 205 atomic_add(count, &wait->sdma_busy); 206} 207 208/** 209 * iowait_sdma_dec - note sdma complete 210 * @wait: iowait structure 211 */ 212static inline int iowait_sdma_dec(struct iowait *wait) 213{ 214 if (!wait) 215 return 0; 216 return atomic_dec_and_test(&wait->sdma_busy); 217} 218 219/** 220 * iowait_pio_drain() - wait for pios to drain 221 * 222 * @wait: iowait structure 223 * 224 * This will delay until the iowait pios have 225 * completed. 226 */ 227static inline void iowait_pio_drain(struct iowait *wait) 228{ 229 wait_event_timeout(wait->wait_pio, 230 !atomic_read(&wait->pio_busy), 231 HZ); 232} 233 234/** 235 * iowait_pio_pending() - return pio pending count 236 * 237 * @wait: iowait structure 238 * 239 */ 240static inline int iowait_pio_pending(struct iowait *wait) 241{ 242 return atomic_read(&wait->pio_busy); 243} 244 245/** 246 * iowait_pio_inc - note pio pending 247 * @wait: iowait structure 248 */ 249static inline void iowait_pio_inc(struct iowait *wait) 250{ 251 atomic_inc(&wait->pio_busy); 252} 253 254/** 255 * iowait_pio_dec - note pio complete 256 * @wait: iowait structure 257 */ 258static inline int iowait_pio_dec(struct iowait *wait) 259{ 260 if (!wait) 261 return 0; 262 return atomic_dec_and_test(&wait->pio_busy); 263} 264 265/** 266 * iowait_drain_wakeup() - trigger iowait_drain() waiter 267 * 268 * @wait: iowait structure 269 * 270 * This will trigger any waiters. 271 */ 272static inline void iowait_drain_wakeup(struct iowait *wait) 273{ 274 wake_up(&wait->wait_dma); 275 wake_up(&wait->wait_pio); 276 if (wait->sdma_drained) 277 wait->sdma_drained(wait); 278} 279 280/** 281 * iowait_get_txhead() - get packet off of iowait list 282 * 283 * @wait: iowait_work structure 284 */ 285static inline struct sdma_txreq *iowait_get_txhead(struct iowait_work *wait) 286{ 287 struct sdma_txreq *tx = NULL; 288 289 if (!list_empty(&wait->tx_head)) { 290 tx = list_first_entry( 291 &wait->tx_head, 292 struct sdma_txreq, 293 list); 294 list_del_init(&tx->list); 295 } 296 return tx; 297} 298 299static inline u16 iowait_get_desc(struct iowait_work *w) 300{ 301 u16 num_desc = 0; 302 struct sdma_txreq *tx = NULL; 303 304 if (!list_empty(&w->tx_head)) { 305 tx = list_first_entry(&w->tx_head, struct sdma_txreq, 306 list); 307 num_desc = tx->num_desc; 308 if (tx->flags & SDMA_TXREQ_F_VIP) 309 w->iow->priority++; 310 } 311 return num_desc; 312} 313 314static inline u32 iowait_get_all_desc(struct iowait *w) 315{ 316 u32 num_desc = 0; 317 318 num_desc = iowait_get_desc(&w->wait[IOWAIT_IB_SE]); 319 num_desc += iowait_get_desc(&w->wait[IOWAIT_TID_SE]); 320 return num_desc; 321} 322 323static inline void iowait_update_priority(struct iowait_work *w) 324{ 325 struct sdma_txreq *tx = NULL; 326 327 if (!list_empty(&w->tx_head)) { 328 tx = list_first_entry(&w->tx_head, struct sdma_txreq, 329 list); 330 if (tx->flags & SDMA_TXREQ_F_VIP) 331 w->iow->priority++; 332 } 333} 334 335static inline void iowait_update_all_priority(struct iowait *w) 336{ 337 iowait_update_priority(&w->wait[IOWAIT_IB_SE]); 338 iowait_update_priority(&w->wait[IOWAIT_TID_SE]); 339} 340 341static inline void iowait_init_priority(struct iowait *w) 342{ 343 w->priority = 0; 344 if (w->init_priority) 345 w->init_priority(w); 346} 347 348static inline void iowait_get_priority(struct iowait *w) 349{ 350 iowait_init_priority(w); 351 iowait_update_all_priority(w); 352} 353 354/** 355 * iowait_queue - Put the iowait on a wait queue 356 * @pkts_sent: have some packets been sent before queuing? 357 * @w: the iowait struct 358 * @wait_head: the wait queue 359 * 360 * This function is called to insert an iowait struct into a 361 * wait queue after a resource (eg, sdma descriptor or pio 362 * buffer) is run out. 363 */ 364static inline void iowait_queue(bool pkts_sent, struct iowait *w, 365 struct list_head *wait_head) 366{ 367 /* 368 * To play fair, insert the iowait at the tail of the wait queue if it 369 * has already sent some packets; Otherwise, put it at the head. 370 * However, if it has priority packets to send, also put it at the 371 * head. 372 */ 373 if (pkts_sent) 374 w->starved_cnt = 0; 375 else 376 w->starved_cnt++; 377 378 if (w->priority > 0 || !pkts_sent) 379 list_add(&w->list, wait_head); 380 else 381 list_add_tail(&w->list, wait_head); 382} 383 384/** 385 * iowait_starve_clear - clear the wait queue's starve count 386 * @pkts_sent: have some packets been sent? 387 * @w: the iowait struct 388 * 389 * This function is called to clear the starve count. If no 390 * packets have been sent, the starve count will not be cleared. 391 */ 392static inline void iowait_starve_clear(bool pkts_sent, struct iowait *w) 393{ 394 if (pkts_sent) 395 w->starved_cnt = 0; 396} 397 398/* Update the top priority index */ 399uint iowait_priority_update_top(struct iowait *w, 400 struct iowait *top, 401 uint idx, uint top_idx); 402 403/** 404 * iowait_packet_queued() - determine if a packet is queued 405 * @wait: the iowait_work structure 406 */ 407static inline bool iowait_packet_queued(struct iowait_work *wait) 408{ 409 return !list_empty(&wait->tx_head); 410} 411 412/** 413 * inc_wait_count - increment wait counts 414 * @w: the log work struct 415 * @n: the count 416 */ 417static inline void iowait_inc_wait_count(struct iowait_work *w, u16 n) 418{ 419 if (!w) 420 return; 421 w->iow->tx_count++; 422 w->iow->count += n; 423} 424 425/** 426 * iowait_get_tid_work - return iowait_work for tid SE 427 * @w: the iowait struct 428 */ 429static inline struct iowait_work *iowait_get_tid_work(struct iowait *w) 430{ 431 return &w->wait[IOWAIT_TID_SE]; 432} 433 434/** 435 * iowait_get_ib_work - return iowait_work for ib SE 436 * @w: the iowait struct 437 */ 438static inline struct iowait_work *iowait_get_ib_work(struct iowait *w) 439{ 440 return &w->wait[IOWAIT_IB_SE]; 441} 442 443/** 444 * iowait_ioww_to_iow - return iowait given iowait_work 445 * @w: the iowait_work struct 446 */ 447static inline struct iowait *iowait_ioww_to_iow(struct iowait_work *w) 448{ 449 if (likely(w)) 450 return w->iow; 451 return NULL; 452} 453 454void iowait_cancel_work(struct iowait *w); 455int iowait_set_work_flag(struct iowait_work *w); 456 457#endif