cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

tx.h (5525B)


      1/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
      2/*
      3 * Copyright (C) 2020-2022 Intel Corporation
      4 */
      5#ifndef __iwl_trans_queue_tx_h__
      6#define __iwl_trans_queue_tx_h__
      7#include "iwl-fh.h"
      8#include "fw/api/tx.h"
      9
     10struct iwl_tso_hdr_page {
     11	struct page *page;
     12	u8 *pos;
     13};
     14
     15static inline dma_addr_t
     16iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
     17{
     18	return txq->first_tb_dma +
     19	       sizeof(struct iwl_pcie_first_tb_buf) * idx;
     20}
     21
     22static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
     23{
     24	return index & (q->n_window - 1);
     25}
     26
     27void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id);
     28
     29static inline void iwl_wake_queue(struct iwl_trans *trans,
     30				  struct iwl_txq *txq)
     31{
     32	if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
     33		IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
     34		iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
     35	}
     36}
     37
     38static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
     39				    struct iwl_txq *txq, int idx)
     40{
     41	if (trans->trans_cfg->use_tfh)
     42		idx = iwl_txq_get_cmd_index(txq, idx);
     43
     44	return (u8 *)txq->tfds + trans->txqs.tfd.size * idx;
     45}
     46
     47int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
     48		  bool cmd_queue);
     49/*
     50 * We need this inline in case dma_addr_t is only 32-bits - since the
     51 * hardware is always 64-bit, the issue can still occur in that case,
     52 * so use u64 for 'phys' here to force the addition in 64-bit.
     53 */
     54static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len)
     55{
     56	return upper_32_bits(phys) != upper_32_bits(phys + len);
     57}
     58
     59int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q);
     60
     61static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
     62{
     63	if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
     64		iwl_op_mode_queue_full(trans->op_mode, txq->id);
     65		IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
     66	} else {
     67		IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
     68				    txq->id);
     69	}
     70}
     71
     72/**
     73 * iwl_txq_inc_wrap - increment queue index, wrap back to beginning
     74 * @index -- current index
     75 */
     76static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
     77{
     78	return ++index &
     79		(trans->trans_cfg->base_params->max_tfd_queue_size - 1);
     80}
     81
     82/**
     83 * iwl_txq_dec_wrap - decrement queue index, wrap back to end
     84 * @index -- current index
     85 */
     86static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
     87{
     88	return --index &
     89		(trans->trans_cfg->base_params->max_tfd_queue_size - 1);
     90}
     91
     92static inline bool iwl_txq_used(const struct iwl_txq *q, int i)
     93{
     94	int index = iwl_txq_get_cmd_index(q, i);
     95	int r = iwl_txq_get_cmd_index(q, q->read_ptr);
     96	int w = iwl_txq_get_cmd_index(q, q->write_ptr);
     97
     98	return w >= r ?
     99		(index >= r && index < w) :
    100		!(index < r && index >= w);
    101}
    102
    103void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb);
    104
    105void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
    106
    107int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
    108			struct iwl_tfh_tfd *tfd, dma_addr_t addr,
    109			u16 len);
    110
    111void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
    112			    struct iwl_cmd_meta *meta,
    113			    struct iwl_tfh_tfd *tfd);
    114
    115int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags,
    116		      u32 sta_mask, u8 tid,
    117		      int size, unsigned int timeout);
    118
    119int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
    120		    struct iwl_device_tx_cmd *dev_cmd, int txq_id);
    121
    122void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
    123void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
    124void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
    125void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
    126int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
    127		 bool cmd_queue);
    128int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size);
    129#ifdef CONFIG_INET
    130struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
    131				      struct sk_buff *skb);
    132#endif
    133static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans,
    134					      void *_tfd)
    135{
    136	struct iwl_tfd *tfd;
    137
    138	if (trans->trans_cfg->use_tfh) {
    139		struct iwl_tfh_tfd *tfh_tfd = _tfd;
    140
    141		return le16_to_cpu(tfh_tfd->num_tbs) & 0x1f;
    142	}
    143
    144	tfd = (struct iwl_tfd *)_tfd;
    145	return tfd->num_tbs & 0x1f;
    146}
    147
    148static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
    149					      void *_tfd, u8 idx)
    150{
    151	struct iwl_tfd *tfd;
    152	struct iwl_tfd_tb *tb;
    153
    154	if (trans->trans_cfg->use_tfh) {
    155		struct iwl_tfh_tfd *tfh_tfd = _tfd;
    156		struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
    157
    158		return le16_to_cpu(tfh_tb->tb_len);
    159	}
    160
    161	tfd = (struct iwl_tfd *)_tfd;
    162	tb = &tfd->tbs[idx];
    163
    164	return le16_to_cpu(tb->hi_n_len) >> 4;
    165}
    166
    167void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
    168			    struct iwl_cmd_meta *meta,
    169			    struct iwl_txq *txq, int index);
    170void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
    171				     struct iwl_txq *txq);
    172void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
    173				      struct iwl_txq *txq, u16 byte_cnt,
    174				      int num_tbs);
    175void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
    176		     struct sk_buff_head *skbs);
    177void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
    178void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
    179				bool freeze);
    180void iwl_txq_progress(struct iwl_txq *txq);
    181void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
    182int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
    183#endif /* __iwl_trans_queue_tx_h__ */