cq.c (7019B)
1/* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33#include <linux/kernel.h> 34#include <linux/hardirq.h> 35#include <linux/mlx5/driver.h> 36#include <rdma/ib_verbs.h> 37#include <linux/mlx5/cq.h> 38#include "mlx5_core.h" 39#include "lib/eq.h" 40 41#define TASKLET_MAX_TIME 2 42#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME) 43 44void mlx5_cq_tasklet_cb(struct tasklet_struct *t) 45{ 46 unsigned long flags; 47 unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES; 48 struct mlx5_eq_tasklet *ctx = from_tasklet(ctx, t, task); 49 struct mlx5_core_cq *mcq; 50 struct mlx5_core_cq *temp; 51 52 spin_lock_irqsave(&ctx->lock, flags); 53 list_splice_tail_init(&ctx->list, &ctx->process_list); 54 spin_unlock_irqrestore(&ctx->lock, flags); 55 56 list_for_each_entry_safe(mcq, temp, &ctx->process_list, 57 tasklet_ctx.list) { 58 list_del_init(&mcq->tasklet_ctx.list); 59 mcq->tasklet_ctx.comp(mcq, NULL); 60 mlx5_cq_put(mcq); 61 if (time_after(jiffies, end)) 62 break; 63 } 64 65 if (!list_empty(&ctx->process_list)) 66 tasklet_schedule(&ctx->task); 67} 68 69static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq, 70 struct mlx5_eqe *eqe) 71{ 72 unsigned long flags; 73 struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv; 74 75 spin_lock_irqsave(&tasklet_ctx->lock, flags); 76 /* When migrating CQs between EQs will be implemented, please note 77 * that you need to sync this point. It is possible that 78 * while migrating a CQ, completions on the old EQs could 79 * still arrive. 80 */ 81 if (list_empty_careful(&cq->tasklet_ctx.list)) { 82 mlx5_cq_hold(cq); 83 list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); 84 } 85 spin_unlock_irqrestore(&tasklet_ctx->lock, flags); 86} 87 88/* Callers must verify outbox status in case of err */ 89int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 90 u32 *in, int inlen, u32 *out, int outlen) 91{ 92 int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), 93 c_eqn_or_apu_element); 94 u32 din[MLX5_ST_SZ_DW(destroy_cq_in)] = {}; 95 struct mlx5_eq_comp *eq; 96 int err; 97 98 eq = mlx5_eqn2comp_eq(dev, eqn); 99 if (IS_ERR(eq)) 100 return PTR_ERR(eq); 101 102 memset(out, 0, outlen); 103 MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ); 104 err = mlx5_cmd_do(dev, in, inlen, out, outlen); 105 if (err) 106 return err; 107 108 cq->cqn = MLX5_GET(create_cq_out, out, cqn); 109 cq->cons_index = 0; 110 cq->arm_sn = 0; 111 cq->eq = eq; 112 cq->uid = MLX5_GET(create_cq_in, in, uid); 113 refcount_set(&cq->refcount, 1); 114 init_completion(&cq->free); 115 if (!cq->comp) 116 cq->comp = mlx5_add_cq_to_tasklet; 117 /* assuming CQ will be deleted before the EQ */ 118 cq->tasklet_ctx.priv = &eq->tasklet_ctx; 119 INIT_LIST_HEAD(&cq->tasklet_ctx.list); 120 121 /* Add to comp EQ CQ tree to recv comp events */ 122 err = mlx5_eq_add_cq(&eq->core, cq); 123 if (err) 124 goto err_cmd; 125 126 /* Add to async EQ CQ tree to recv async events */ 127 err = mlx5_eq_add_cq(mlx5_get_async_eq(dev), cq); 128 if (err) 129 goto err_cq_add; 130 131 cq->pid = current->pid; 132 err = mlx5_debug_cq_add(dev, cq); 133 if (err) 134 mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n", 135 cq->cqn); 136 137 cq->uar = dev->priv.uar; 138 cq->irqn = eq->core.irqn; 139 140 return 0; 141 142err_cq_add: 143 mlx5_eq_del_cq(&eq->core, cq); 144err_cmd: 145 MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ); 146 MLX5_SET(destroy_cq_in, din, cqn, cq->cqn); 147 MLX5_SET(destroy_cq_in, din, uid, cq->uid); 148 mlx5_cmd_exec_in(dev, destroy_cq, din); 149 return err; 150} 151EXPORT_SYMBOL(mlx5_create_cq); 152 153/* oubox is checked and err val is normalized */ 154int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 155 u32 *in, int inlen, u32 *out, int outlen) 156{ 157 int err = mlx5_create_cq(dev, cq, in, inlen, out, outlen); 158 159 return mlx5_cmd_check(dev, err, in, out); 160} 161EXPORT_SYMBOL(mlx5_core_create_cq); 162 163int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) 164{ 165 u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {}; 166 int err; 167 168 mlx5_debug_cq_remove(dev, cq); 169 170 mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq); 171 mlx5_eq_del_cq(&cq->eq->core, cq); 172 173 MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ); 174 MLX5_SET(destroy_cq_in, in, cqn, cq->cqn); 175 MLX5_SET(destroy_cq_in, in, uid, cq->uid); 176 err = mlx5_cmd_exec_in(dev, destroy_cq, in); 177 if (err) 178 return err; 179 180 synchronize_irq(cq->irqn); 181 mlx5_cq_put(cq); 182 wait_for_completion(&cq->free); 183 184 return 0; 185} 186EXPORT_SYMBOL(mlx5_core_destroy_cq); 187 188int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 189 u32 *out) 190{ 191 u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {}; 192 193 MLX5_SET(query_cq_in, in, opcode, MLX5_CMD_OP_QUERY_CQ); 194 MLX5_SET(query_cq_in, in, cqn, cq->cqn); 195 return mlx5_cmd_exec_inout(dev, query_cq, in, out); 196} 197EXPORT_SYMBOL(mlx5_core_query_cq); 198 199int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 200 u32 *in, int inlen) 201{ 202 u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {}; 203 204 MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ); 205 MLX5_SET(modify_cq_in, in, uid, cq->uid); 206 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 207} 208EXPORT_SYMBOL(mlx5_core_modify_cq); 209 210int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, 211 struct mlx5_core_cq *cq, 212 u16 cq_period, 213 u16 cq_max_count) 214{ 215 u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {}; 216 void *cqc; 217 218 MLX5_SET(modify_cq_in, in, cqn, cq->cqn); 219 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context); 220 MLX5_SET(cqc, cqc, cq_period, cq_period); 221 MLX5_SET(cqc, cqc, cq_max_count, cq_max_count); 222 MLX5_SET(modify_cq_in, in, 223 modify_field_select_resize_field_select.modify_field_select.modify_field_select, 224 MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT); 225 226 return mlx5_core_modify_cq(dev, cq, in, sizeof(in)); 227} 228EXPORT_SYMBOL(mlx5_core_modify_cq_moderation);