cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ptp.c (19899B)


      1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
      2// Copyright (c) 2020 Mellanox Technologies
      3
      4#include "en/ptp.h"
      5#include "en/txrx.h"
      6#include "en/params.h"
      7#include "en/fs_tt_redirect.h"
      8
      9struct mlx5e_ptp_fs {
     10	struct mlx5_flow_handle *l2_rule;
     11	struct mlx5_flow_handle *udp_v4_rule;
     12	struct mlx5_flow_handle *udp_v6_rule;
     13	bool valid;
     14};
     15
     16struct mlx5e_ptp_params {
     17	struct mlx5e_params params;
     18	struct mlx5e_sq_param txq_sq_param;
     19	struct mlx5e_rq_param rq_param;
     20};
     21
     22struct mlx5e_skb_cb_hwtstamp {
     23	ktime_t cqe_hwtstamp;
     24	ktime_t port_hwtstamp;
     25};
     26
     27void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb)
     28{
     29	memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
     30}
     31
     32static struct mlx5e_skb_cb_hwtstamp *mlx5e_skb_cb_get_hwts(struct sk_buff *skb)
     33{
     34	BUILD_BUG_ON(sizeof(struct mlx5e_skb_cb_hwtstamp) > sizeof(skb->cb));
     35	return (struct mlx5e_skb_cb_hwtstamp *)skb->cb;
     36}
     37
     38static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb,
     39				     struct mlx5e_ptp_cq_stats *cq_stats)
     40{
     41	struct skb_shared_hwtstamps hwts = {};
     42	ktime_t diff;
     43
     44	diff = abs(mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp -
     45		   mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp);
     46
     47	/* Maximal allowed diff is 1 / 128 second */
     48	if (diff > (NSEC_PER_SEC >> 7)) {
     49		cq_stats->abort++;
     50		cq_stats->abort_abs_diff_ns += diff;
     51		return;
     52	}
     53
     54	hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp;
     55	skb_tstamp_tx(skb, &hwts);
     56}
     57
     58void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
     59				   ktime_t hwtstamp,
     60				   struct mlx5e_ptp_cq_stats *cq_stats)
     61{
     62	switch (hwtstamp_type) {
     63	case (MLX5E_SKB_CB_CQE_HWTSTAMP):
     64		mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp = hwtstamp;
     65		break;
     66	case (MLX5E_SKB_CB_PORT_HWTSTAMP):
     67		mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp = hwtstamp;
     68		break;
     69	}
     70
     71	/* If both CQEs arrive, check and report the port tstamp, and clear skb cb as
     72	 * skb soon to be released.
     73	 */
     74	if (!mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp ||
     75	    !mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp)
     76		return;
     77
     78	mlx5e_skb_cb_hwtstamp_tx(skb, cq_stats);
     79	memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
     80}
     81
     82static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
     83				    struct mlx5_cqe64 *cqe,
     84				    int budget)
     85{
     86	struct sk_buff *skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
     87	struct mlx5e_txqsq *sq = &ptpsq->txqsq;
     88	ktime_t hwtstamp;
     89
     90	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
     91		ptpsq->cq_stats->err_cqe++;
     92		goto out;
     93	}
     94
     95	hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
     96	mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
     97				      hwtstamp, ptpsq->cq_stats);
     98	ptpsq->cq_stats->cqe++;
     99
    100out:
    101	napi_consume_skb(skb, budget);
    102}
    103
    104static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
    105{
    106	struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq);
    107	struct mlx5_cqwq *cqwq = &cq->wq;
    108	struct mlx5_cqe64 *cqe;
    109	int work_done = 0;
    110
    111	if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state)))
    112		return false;
    113
    114	cqe = mlx5_cqwq_get_cqe(cqwq);
    115	if (!cqe)
    116		return false;
    117
    118	do {
    119		mlx5_cqwq_pop(cqwq);
    120
    121		mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, budget);
    122	} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
    123
    124	mlx5_cqwq_update_db_record(cqwq);
    125
    126	/* ensure cq space is freed before enabling more cqes */
    127	wmb();
    128
    129	return work_done == budget;
    130}
    131
    132static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget)
    133{
    134	struct mlx5e_ptp *c = container_of(napi, struct mlx5e_ptp, napi);
    135	struct mlx5e_ch_stats *ch_stats = c->stats;
    136	struct mlx5e_rq *rq = &c->rq;
    137	bool busy = false;
    138	int work_done = 0;
    139	int i;
    140
    141	rcu_read_lock();
    142
    143	ch_stats->poll++;
    144
    145	if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
    146		for (i = 0; i < c->num_tc; i++) {
    147			busy |= mlx5e_poll_tx_cq(&c->ptpsq[i].txqsq.cq, budget);
    148			busy |= mlx5e_ptp_poll_ts_cq(&c->ptpsq[i].ts_cq, budget);
    149		}
    150	}
    151	if (test_bit(MLX5E_PTP_STATE_RX, c->state) && likely(budget)) {
    152		work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
    153		busy |= work_done == budget;
    154		busy |= INDIRECT_CALL_2(rq->post_wqes,
    155					mlx5e_post_rx_mpwqes,
    156					mlx5e_post_rx_wqes,
    157					rq);
    158	}
    159
    160	if (busy) {
    161		work_done = budget;
    162		goto out;
    163	}
    164
    165	if (unlikely(!napi_complete_done(napi, work_done)))
    166		goto out;
    167
    168	ch_stats->arm++;
    169
    170	if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
    171		for (i = 0; i < c->num_tc; i++) {
    172			mlx5e_cq_arm(&c->ptpsq[i].txqsq.cq);
    173			mlx5e_cq_arm(&c->ptpsq[i].ts_cq);
    174		}
    175	}
    176	if (test_bit(MLX5E_PTP_STATE_RX, c->state))
    177		mlx5e_cq_arm(&rq->cq);
    178
    179out:
    180	rcu_read_unlock();
    181
    182	return work_done;
    183}
    184
    185static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
    186				 struct mlx5e_params *params,
    187				 struct mlx5e_sq_param *param,
    188				 struct mlx5e_txqsq *sq, int tc,
    189				 struct mlx5e_ptpsq *ptpsq)
    190{
    191	void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
    192	struct mlx5_core_dev *mdev = c->mdev;
    193	struct mlx5_wq_cyc *wq = &sq->wq;
    194	int err;
    195	int node;
    196
    197	sq->pdev      = c->pdev;
    198	sq->clock     = &mdev->clock;
    199	sq->mkey_be   = c->mkey_be;
    200	sq->netdev    = c->netdev;
    201	sq->priv      = c->priv;
    202	sq->mdev      = mdev;
    203	sq->ch_ix     = MLX5E_PTP_CHANNEL_IX;
    204	sq->txq_ix    = txq_ix;
    205	sq->uar_map   = mdev->mlx5e_res.hw_objs.bfreg.map;
    206	sq->min_inline_mode = params->tx_min_inline_mode;
    207	sq->hw_mtu    = MLX5E_SW2HW_MTU(params, params->sw_mtu);
    208	sq->stats     = &c->priv->ptp_stats.sq[tc];
    209	sq->ptpsq     = ptpsq;
    210	INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
    211	if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
    212		set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
    213	sq->stop_room = param->stop_room;
    214	sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev);
    215
    216	node = dev_to_node(mlx5_core_dma_dev(mdev));
    217
    218	param->wq.db_numa_node = node;
    219	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
    220	if (err)
    221		return err;
    222	wq->db    = &wq->db[MLX5_SND_DBR];
    223
    224	err = mlx5e_alloc_txqsq_db(sq, node);
    225	if (err)
    226		goto err_sq_wq_destroy;
    227
    228	return 0;
    229
    230err_sq_wq_destroy:
    231	mlx5_wq_destroy(&sq->wq_ctrl);
    232
    233	return err;
    234}
    235
    236static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
    237{
    238	mlx5_core_destroy_sq(mdev, sqn);
    239}
    240
    241static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
    242{
    243	int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq);
    244
    245	ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)),
    246					     GFP_KERNEL, numa);
    247	if (!ptpsq->skb_fifo.fifo)
    248		return -ENOMEM;
    249
    250	ptpsq->skb_fifo.pc   = &ptpsq->skb_fifo_pc;
    251	ptpsq->skb_fifo.cc   = &ptpsq->skb_fifo_cc;
    252	ptpsq->skb_fifo.mask = wq_sz - 1;
    253
    254	return 0;
    255}
    256
    257static void mlx5e_ptp_drain_skb_fifo(struct mlx5e_skb_fifo *skb_fifo)
    258{
    259	while (*skb_fifo->pc != *skb_fifo->cc) {
    260		struct sk_buff *skb = mlx5e_skb_fifo_pop(skb_fifo);
    261
    262		dev_kfree_skb_any(skb);
    263	}
    264}
    265
    266static void mlx5e_ptp_free_traffic_db(struct mlx5e_skb_fifo *skb_fifo)
    267{
    268	mlx5e_ptp_drain_skb_fifo(skb_fifo);
    269	kvfree(skb_fifo->fifo);
    270}
    271
    272static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
    273				int txq_ix, struct mlx5e_ptp_params *cparams,
    274				int tc, struct mlx5e_ptpsq *ptpsq)
    275{
    276	struct mlx5e_sq_param *sqp = &cparams->txq_sq_param;
    277	struct mlx5e_txqsq *txqsq = &ptpsq->txqsq;
    278	struct mlx5e_create_sq_param csp = {};
    279	int err;
    280
    281	err = mlx5e_ptp_alloc_txqsq(c, txq_ix, &cparams->params, sqp,
    282				    txqsq, tc, ptpsq);
    283	if (err)
    284		return err;
    285
    286	csp.tisn            = tisn;
    287	csp.tis_lst_sz      = 1;
    288	csp.cqn             = txqsq->cq.mcq.cqn;
    289	csp.wq_ctrl         = &txqsq->wq_ctrl;
    290	csp.min_inline_mode = txqsq->min_inline_mode;
    291	csp.ts_cqe_to_dest_cqn = ptpsq->ts_cq.mcq.cqn;
    292
    293	err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, 0, &txqsq->sqn);
    294	if (err)
    295		goto err_free_txqsq;
    296
    297	err = mlx5e_ptp_alloc_traffic_db(ptpsq,
    298					 dev_to_node(mlx5_core_dma_dev(c->mdev)));
    299	if (err)
    300		goto err_free_txqsq;
    301
    302	return 0;
    303
    304err_free_txqsq:
    305	mlx5e_free_txqsq(txqsq);
    306
    307	return err;
    308}
    309
    310static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq)
    311{
    312	struct mlx5e_txqsq *sq = &ptpsq->txqsq;
    313	struct mlx5_core_dev *mdev = sq->mdev;
    314
    315	mlx5e_ptp_free_traffic_db(&ptpsq->skb_fifo);
    316	cancel_work_sync(&sq->recover_work);
    317	mlx5e_ptp_destroy_sq(mdev, sq->sqn);
    318	mlx5e_free_txqsq_descs(sq);
    319	mlx5e_free_txqsq(sq);
    320}
    321
    322static int mlx5e_ptp_open_txqsqs(struct mlx5e_ptp *c,
    323				 struct mlx5e_ptp_params *cparams)
    324{
    325	struct mlx5e_params *params = &cparams->params;
    326	u8 num_tc = mlx5e_get_dcb_num_tc(params);
    327	int ix_base;
    328	int err;
    329	int tc;
    330
    331	ix_base = num_tc * params->num_channels;
    332
    333	for (tc = 0; tc < num_tc; tc++) {
    334		int txq_ix = ix_base + tc;
    335
    336		err = mlx5e_ptp_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
    337					   cparams, tc, &c->ptpsq[tc]);
    338		if (err)
    339			goto close_txqsq;
    340	}
    341
    342	return 0;
    343
    344close_txqsq:
    345	for (--tc; tc >= 0; tc--)
    346		mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
    347
    348	return err;
    349}
    350
    351static void mlx5e_ptp_close_txqsqs(struct mlx5e_ptp *c)
    352{
    353	int tc;
    354
    355	for (tc = 0; tc < c->num_tc; tc++)
    356		mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
    357}
    358
    359static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
    360				 struct mlx5e_ptp_params *cparams)
    361{
    362	struct mlx5e_params *params = &cparams->params;
    363	struct mlx5e_create_cq_param ccp = {};
    364	struct dim_cq_moder ptp_moder = {};
    365	struct mlx5e_cq_param *cq_param;
    366	u8 num_tc;
    367	int err;
    368	int tc;
    369
    370	num_tc = mlx5e_get_dcb_num_tc(params);
    371
    372	ccp.node     = dev_to_node(mlx5_core_dma_dev(c->mdev));
    373	ccp.ch_stats = c->stats;
    374	ccp.napi     = &c->napi;
    375	ccp.ix       = MLX5E_PTP_CHANNEL_IX;
    376
    377	cq_param = &cparams->txq_sq_param.cqp;
    378
    379	for (tc = 0; tc < num_tc; tc++) {
    380		struct mlx5e_cq *cq = &c->ptpsq[tc].txqsq.cq;
    381
    382		err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
    383		if (err)
    384			goto out_err_txqsq_cq;
    385	}
    386
    387	for (tc = 0; tc < num_tc; tc++) {
    388		struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq;
    389		struct mlx5e_ptpsq *ptpsq = &c->ptpsq[tc];
    390
    391		err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
    392		if (err)
    393			goto out_err_ts_cq;
    394
    395		ptpsq->cq_stats = &c->priv->ptp_stats.cq[tc];
    396	}
    397
    398	return 0;
    399
    400out_err_ts_cq:
    401	for (--tc; tc >= 0; tc--)
    402		mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
    403	tc = num_tc;
    404out_err_txqsq_cq:
    405	for (--tc; tc >= 0; tc--)
    406		mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
    407
    408	return err;
    409}
    410
    411static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp *c,
    412				struct mlx5e_ptp_params *cparams)
    413{
    414	struct mlx5e_create_cq_param ccp = {};
    415	struct dim_cq_moder ptp_moder = {};
    416	struct mlx5e_cq_param *cq_param;
    417	struct mlx5e_cq *cq = &c->rq.cq;
    418
    419	ccp.node     = dev_to_node(mlx5_core_dma_dev(c->mdev));
    420	ccp.ch_stats = c->stats;
    421	ccp.napi     = &c->napi;
    422	ccp.ix       = MLX5E_PTP_CHANNEL_IX;
    423
    424	cq_param = &cparams->rq_param.cqp;
    425
    426	return mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
    427}
    428
    429static void mlx5e_ptp_close_tx_cqs(struct mlx5e_ptp *c)
    430{
    431	int tc;
    432
    433	for (tc = 0; tc < c->num_tc; tc++)
    434		mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
    435
    436	for (tc = 0; tc < c->num_tc; tc++)
    437		mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
    438}
    439
    440static void mlx5e_ptp_build_sq_param(struct mlx5_core_dev *mdev,
    441				     struct mlx5e_params *params,
    442				     struct mlx5e_sq_param *param)
    443{
    444	void *sqc = param->sqc;
    445	void *wq;
    446
    447	mlx5e_build_sq_param_common(mdev, param);
    448
    449	wq = MLX5_ADDR_OF(sqc, sqc, wq);
    450	MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
    451	param->stop_room = mlx5e_stop_room_for_max_wqe(mdev);
    452	mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
    453}
    454
    455static void mlx5e_ptp_build_rq_param(struct mlx5_core_dev *mdev,
    456				     struct net_device *netdev,
    457				     u16 q_counter,
    458				     struct mlx5e_ptp_params *ptp_params)
    459{
    460	struct mlx5e_rq_param *rq_params = &ptp_params->rq_param;
    461	struct mlx5e_params *params = &ptp_params->params;
    462
    463	params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
    464	mlx5e_init_rq_type_params(mdev, params);
    465	params->sw_mtu = netdev->max_mtu;
    466	mlx5e_build_rq_param(mdev, params, NULL, q_counter, rq_params);
    467}
    468
    469static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
    470				   struct mlx5e_ptp_params *cparams,
    471				   struct mlx5e_params *orig)
    472{
    473	struct mlx5e_params *params = &cparams->params;
    474
    475	params->tx_min_inline_mode = orig->tx_min_inline_mode;
    476	params->num_channels = orig->num_channels;
    477	params->hard_mtu = orig->hard_mtu;
    478	params->sw_mtu = orig->sw_mtu;
    479	params->mqprio = orig->mqprio;
    480
    481	/* SQ */
    482	if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
    483		params->log_sq_size = orig->log_sq_size;
    484		mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
    485	}
    486	/* RQ */
    487	if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
    488		params->vlan_strip_disable = orig->vlan_strip_disable;
    489		mlx5e_ptp_build_rq_param(c->mdev, c->netdev, c->priv->q_counter, cparams);
    490	}
    491}
    492
    493static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
    494			     struct mlx5e_rq *rq)
    495{
    496	struct mlx5_core_dev *mdev = c->mdev;
    497	struct mlx5e_priv *priv = c->priv;
    498	int err;
    499
    500	rq->wq_type      = params->rq_wq_type;
    501	rq->pdev         = c->pdev;
    502	rq->netdev       = priv->netdev;
    503	rq->priv         = priv;
    504	rq->clock        = &mdev->clock;
    505	rq->tstamp       = &priv->tstamp;
    506	rq->mdev         = mdev;
    507	rq->hw_mtu       = MLX5E_SW2HW_MTU(params, params->sw_mtu);
    508	rq->stats        = &c->priv->ptp_stats.rq;
    509	rq->ix           = MLX5E_PTP_CHANNEL_IX;
    510	rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
    511	err = mlx5e_rq_set_handlers(rq, params, false);
    512	if (err)
    513		return err;
    514
    515	return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0);
    516}
    517
    518static int mlx5e_ptp_open_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
    519			     struct mlx5e_rq_param *rq_param)
    520{
    521	int node = dev_to_node(c->mdev->device);
    522	int err;
    523
    524	err = mlx5e_init_ptp_rq(c, params, &c->rq);
    525	if (err)
    526		return err;
    527
    528	return mlx5e_open_rq(params, rq_param, NULL, node, &c->rq);
    529}
    530
    531static int mlx5e_ptp_open_queues(struct mlx5e_ptp *c,
    532				 struct mlx5e_ptp_params *cparams)
    533{
    534	int err;
    535
    536	if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
    537		err = mlx5e_ptp_open_tx_cqs(c, cparams);
    538		if (err)
    539			return err;
    540
    541		err = mlx5e_ptp_open_txqsqs(c, cparams);
    542		if (err)
    543			goto close_tx_cqs;
    544	}
    545	if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
    546		err = mlx5e_ptp_open_rx_cq(c, cparams);
    547		if (err)
    548			goto close_txqsq;
    549
    550		err = mlx5e_ptp_open_rq(c, &cparams->params, &cparams->rq_param);
    551		if (err)
    552			goto close_rx_cq;
    553	}
    554	return 0;
    555
    556close_rx_cq:
    557	if (test_bit(MLX5E_PTP_STATE_RX, c->state))
    558		mlx5e_close_cq(&c->rq.cq);
    559close_txqsq:
    560	if (test_bit(MLX5E_PTP_STATE_TX, c->state))
    561		mlx5e_ptp_close_txqsqs(c);
    562close_tx_cqs:
    563	if (test_bit(MLX5E_PTP_STATE_TX, c->state))
    564		mlx5e_ptp_close_tx_cqs(c);
    565
    566	return err;
    567}
    568
    569static void mlx5e_ptp_close_queues(struct mlx5e_ptp *c)
    570{
    571	if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
    572		mlx5e_close_rq(&c->rq);
    573		mlx5e_close_cq(&c->rq.cq);
    574	}
    575	if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
    576		mlx5e_ptp_close_txqsqs(c);
    577		mlx5e_ptp_close_tx_cqs(c);
    578	}
    579}
    580
    581static int mlx5e_ptp_set_state(struct mlx5e_ptp *c, struct mlx5e_params *params)
    582{
    583	if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS))
    584		__set_bit(MLX5E_PTP_STATE_TX, c->state);
    585
    586	if (params->ptp_rx)
    587		__set_bit(MLX5E_PTP_STATE_RX, c->state);
    588
    589	return bitmap_empty(c->state, MLX5E_PTP_STATE_NUM_STATES) ? -EINVAL : 0;
    590}
    591
    592static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
    593{
    594	struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
    595
    596	if (!ptp_fs->valid)
    597		return;
    598
    599	mlx5e_fs_tt_redirect_del_rule(ptp_fs->l2_rule);
    600	mlx5e_fs_tt_redirect_any_destroy(priv);
    601
    602	mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
    603	mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
    604	mlx5e_fs_tt_redirect_udp_destroy(priv);
    605	ptp_fs->valid = false;
    606}
    607
    608static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
    609{
    610	u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res);
    611	struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
    612	struct mlx5_flow_handle *rule;
    613	int err;
    614
    615	if (ptp_fs->valid)
    616		return 0;
    617
    618	err = mlx5e_fs_tt_redirect_udp_create(priv);
    619	if (err)
    620		goto out_free;
    621
    622	rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5_TT_IPV4_UDP,
    623						 tirn, PTP_EV_PORT);
    624	if (IS_ERR(rule)) {
    625		err = PTR_ERR(rule);
    626		goto out_destroy_fs_udp;
    627	}
    628	ptp_fs->udp_v4_rule = rule;
    629
    630	rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5_TT_IPV6_UDP,
    631						 tirn, PTP_EV_PORT);
    632	if (IS_ERR(rule)) {
    633		err = PTR_ERR(rule);
    634		goto out_destroy_udp_v4_rule;
    635	}
    636	ptp_fs->udp_v6_rule = rule;
    637
    638	err = mlx5e_fs_tt_redirect_any_create(priv);
    639	if (err)
    640		goto out_destroy_udp_v6_rule;
    641
    642	rule = mlx5e_fs_tt_redirect_any_add_rule(priv, tirn, ETH_P_1588);
    643	if (IS_ERR(rule)) {
    644		err = PTR_ERR(rule);
    645		goto out_destroy_fs_any;
    646	}
    647	ptp_fs->l2_rule = rule;
    648	ptp_fs->valid = true;
    649
    650	return 0;
    651
    652out_destroy_fs_any:
    653	mlx5e_fs_tt_redirect_any_destroy(priv);
    654out_destroy_udp_v6_rule:
    655	mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
    656out_destroy_udp_v4_rule:
    657	mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
    658out_destroy_fs_udp:
    659	mlx5e_fs_tt_redirect_udp_destroy(priv);
    660out_free:
    661	return err;
    662}
    663
    664int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
    665		   u8 lag_port, struct mlx5e_ptp **cp)
    666{
    667	struct net_device *netdev = priv->netdev;
    668	struct mlx5_core_dev *mdev = priv->mdev;
    669	struct mlx5e_ptp_params *cparams;
    670	struct mlx5e_ptp *c;
    671	int err;
    672
    673
    674	c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev)));
    675	cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL);
    676	if (!c || !cparams)
    677		return -ENOMEM;
    678
    679	c->priv     = priv;
    680	c->mdev     = priv->mdev;
    681	c->tstamp   = &priv->tstamp;
    682	c->pdev     = mlx5_core_dma_dev(priv->mdev);
    683	c->netdev   = priv->netdev;
    684	c->mkey_be  = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
    685	c->num_tc   = mlx5e_get_dcb_num_tc(params);
    686	c->stats    = &priv->ptp_stats.ch;
    687	c->lag_port = lag_port;
    688
    689	err = mlx5e_ptp_set_state(c, params);
    690	if (err)
    691		goto err_free;
    692
    693	netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll, 64);
    694
    695	mlx5e_ptp_build_params(c, cparams, params);
    696
    697	err = mlx5e_ptp_open_queues(c, cparams);
    698	if (unlikely(err))
    699		goto err_napi_del;
    700
    701	if (test_bit(MLX5E_PTP_STATE_RX, c->state))
    702		priv->rx_ptp_opened = true;
    703
    704	*cp = c;
    705
    706	kvfree(cparams);
    707
    708	return 0;
    709
    710err_napi_del:
    711	netif_napi_del(&c->napi);
    712err_free:
    713	kvfree(cparams);
    714	kvfree(c);
    715	return err;
    716}
    717
    718void mlx5e_ptp_close(struct mlx5e_ptp *c)
    719{
    720	mlx5e_ptp_close_queues(c);
    721	netif_napi_del(&c->napi);
    722
    723	kvfree(c);
    724}
    725
    726void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
    727{
    728	int tc;
    729
    730	napi_enable(&c->napi);
    731
    732	if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
    733		for (tc = 0; tc < c->num_tc; tc++)
    734			mlx5e_activate_txqsq(&c->ptpsq[tc].txqsq);
    735	}
    736	if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
    737		mlx5e_ptp_rx_set_fs(c->priv);
    738		mlx5e_activate_rq(&c->rq);
    739		mlx5e_trigger_napi_sched(&c->napi);
    740	}
    741}
    742
    743void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
    744{
    745	int tc;
    746
    747	if (test_bit(MLX5E_PTP_STATE_RX, c->state))
    748		mlx5e_deactivate_rq(&c->rq);
    749
    750	if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
    751		for (tc = 0; tc < c->num_tc; tc++)
    752			mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq);
    753	}
    754
    755	napi_disable(&c->napi);
    756}
    757
    758int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn)
    759{
    760	if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state))
    761		return -EINVAL;
    762
    763	*rqn = c->rq.rqn;
    764	return 0;
    765}
    766
    767int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv)
    768{
    769	struct mlx5e_ptp_fs *ptp_fs;
    770
    771	if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
    772		return 0;
    773
    774	ptp_fs = kzalloc(sizeof(*ptp_fs), GFP_KERNEL);
    775	if (!ptp_fs)
    776		return -ENOMEM;
    777
    778	priv->fs.ptp_fs = ptp_fs;
    779	return 0;
    780}
    781
    782void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv)
    783{
    784	struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
    785
    786	if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
    787		return;
    788
    789	mlx5e_ptp_rx_unset_fs(priv);
    790	kfree(ptp_fs);
    791}
    792
    793int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set)
    794{
    795	struct mlx5e_ptp *c = priv->channels.ptp;
    796
    797	if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
    798		return 0;
    799
    800	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
    801		return 0;
    802
    803	if (set) {
    804		if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state)) {
    805			netdev_WARN_ONCE(priv->netdev, "Don't try to add PTP RX-FS rules");
    806			return -EINVAL;
    807		}
    808		return mlx5e_ptp_rx_set_fs(priv);
    809	}
    810	/* set == false */
    811	if (c && test_bit(MLX5E_PTP_STATE_RX, c->state)) {
    812		netdev_WARN_ONCE(priv->netdev, "Don't try to remove PTP RX-FS rules");
    813		return -EINVAL;
    814	}
    815	mlx5e_ptp_rx_unset_fs(priv);
    816	return 0;
    817}