hns_roce_cmd.c (7155B)
1/* 2 * Copyright (c) 2016 Hisilicon Limited. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33#include <linux/dmapool.h> 34#include "hns_roce_common.h" 35#include "hns_roce_device.h" 36#include "hns_roce_cmd.h" 37 38#define CMD_POLL_TOKEN 0xffff 39#define CMD_MAX_NUM 32 40 41static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, 42 struct hns_roce_mbox_msg *mbox_msg) 43{ 44 return hr_dev->hw->post_mbox(hr_dev, mbox_msg); 45} 46 47/* this should be called with "poll_sem" */ 48static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, 49 struct hns_roce_mbox_msg *mbox_msg) 50{ 51 int ret; 52 53 ret = hns_roce_cmd_mbox_post_hw(hr_dev, mbox_msg); 54 if (ret) { 55 dev_err_ratelimited(hr_dev->dev, 56 "failed to post mailbox 0x%x in poll mode, ret = %d.\n", 57 mbox_msg->cmd, ret); 58 return ret; 59 } 60 61 return hr_dev->hw->poll_mbox_done(hr_dev); 62} 63 64static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, 65 struct hns_roce_mbox_msg *mbox_msg) 66{ 67 int ret; 68 69 down(&hr_dev->cmd.poll_sem); 70 ret = __hns_roce_cmd_mbox_poll(hr_dev, mbox_msg); 71 up(&hr_dev->cmd.poll_sem); 72 73 return ret; 74} 75 76void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status, 77 u64 out_param) 78{ 79 struct hns_roce_cmd_context *context = 80 &hr_dev->cmd.context[token % hr_dev->cmd.max_cmds]; 81 82 if (unlikely(token != context->token)) { 83 dev_err_ratelimited(hr_dev->dev, 84 "[cmd] invalid ae token 0x%x, context token is 0x%x.\n", 85 token, context->token); 86 return; 87 } 88 89 context->result = (status == HNS_ROCE_CMD_SUCCESS) ? 0 : (-EIO); 90 context->out_param = out_param; 91 complete(&context->done); 92} 93 94static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, 95 struct hns_roce_mbox_msg *mbox_msg) 96{ 97 struct hns_roce_cmdq *cmd = &hr_dev->cmd; 98 struct hns_roce_cmd_context *context; 99 struct device *dev = hr_dev->dev; 100 int ret; 101 102 spin_lock(&cmd->context_lock); 103 104 do { 105 context = &cmd->context[cmd->free_head]; 106 cmd->free_head = context->next; 107 } while (context->busy); 108 109 context->busy = 1; 110 context->token += cmd->max_cmds; 111 112 spin_unlock(&cmd->context_lock); 113 114 reinit_completion(&context->done); 115 116 mbox_msg->token = context->token; 117 ret = hns_roce_cmd_mbox_post_hw(hr_dev, mbox_msg); 118 if (ret) { 119 dev_err_ratelimited(dev, 120 "failed to post mailbox 0x%x in event mode, ret = %d.\n", 121 mbox_msg->cmd, ret); 122 goto out; 123 } 124 125 if (!wait_for_completion_timeout(&context->done, 126 msecs_to_jiffies(HNS_ROCE_CMD_TIMEOUT_MSECS))) { 127 dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x timeout.\n", 128 context->token, mbox_msg->cmd); 129 ret = -EBUSY; 130 goto out; 131 } 132 133 ret = context->result; 134 if (ret) 135 dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x error %d.\n", 136 context->token, mbox_msg->cmd, ret); 137 138out: 139 context->busy = 0; 140 return ret; 141} 142 143static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, 144 struct hns_roce_mbox_msg *mbox_msg) 145{ 146 int ret; 147 148 down(&hr_dev->cmd.event_sem); 149 ret = __hns_roce_cmd_mbox_wait(hr_dev, mbox_msg); 150 up(&hr_dev->cmd.event_sem); 151 152 return ret; 153} 154 155int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, 156 u8 cmd, unsigned long tag) 157{ 158 struct hns_roce_mbox_msg mbox_msg = {}; 159 bool is_busy; 160 161 if (hr_dev->hw->chk_mbox_avail) 162 if (!hr_dev->hw->chk_mbox_avail(hr_dev, &is_busy)) 163 return is_busy ? -EBUSY : 0; 164 165 mbox_msg.in_param = in_param; 166 mbox_msg.out_param = out_param; 167 mbox_msg.cmd = cmd; 168 mbox_msg.tag = tag; 169 170 if (hr_dev->cmd.use_events) { 171 mbox_msg.event_en = 1; 172 173 return hns_roce_cmd_mbox_wait(hr_dev, &mbox_msg); 174 } else { 175 mbox_msg.event_en = 0; 176 mbox_msg.token = CMD_POLL_TOKEN; 177 178 return hns_roce_cmd_mbox_poll(hr_dev, &mbox_msg); 179 } 180} 181 182int hns_roce_cmd_init(struct hns_roce_dev *hr_dev) 183{ 184 sema_init(&hr_dev->cmd.poll_sem, 1); 185 hr_dev->cmd.use_events = 0; 186 hr_dev->cmd.max_cmds = CMD_MAX_NUM; 187 hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", hr_dev->dev, 188 HNS_ROCE_MAILBOX_SIZE, 189 HNS_ROCE_MAILBOX_SIZE, 0); 190 if (!hr_dev->cmd.pool) 191 return -ENOMEM; 192 193 return 0; 194} 195 196void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev) 197{ 198 dma_pool_destroy(hr_dev->cmd.pool); 199} 200 201int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev) 202{ 203 struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd; 204 int i; 205 206 hr_cmd->context = 207 kcalloc(hr_cmd->max_cmds, sizeof(*hr_cmd->context), GFP_KERNEL); 208 if (!hr_cmd->context) { 209 hr_dev->cmd_mod = 0; 210 return -ENOMEM; 211 } 212 213 for (i = 0; i < hr_cmd->max_cmds; ++i) { 214 hr_cmd->context[i].token = i; 215 hr_cmd->context[i].next = i + 1; 216 init_completion(&hr_cmd->context[i].done); 217 } 218 hr_cmd->context[hr_cmd->max_cmds - 1].next = 0; 219 hr_cmd->free_head = 0; 220 221 sema_init(&hr_cmd->event_sem, hr_cmd->max_cmds); 222 spin_lock_init(&hr_cmd->context_lock); 223 224 hr_cmd->use_events = 1; 225 226 return 0; 227} 228 229void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev) 230{ 231 struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd; 232 233 kfree(hr_cmd->context); 234 hr_cmd->use_events = 0; 235} 236 237struct hns_roce_cmd_mailbox * 238hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev) 239{ 240 struct hns_roce_cmd_mailbox *mailbox; 241 242 mailbox = kmalloc(sizeof(*mailbox), GFP_KERNEL); 243 if (!mailbox) 244 return ERR_PTR(-ENOMEM); 245 246 mailbox->buf = 247 dma_pool_alloc(hr_dev->cmd.pool, GFP_KERNEL, &mailbox->dma); 248 if (!mailbox->buf) { 249 kfree(mailbox); 250 return ERR_PTR(-ENOMEM); 251 } 252 253 return mailbox; 254} 255 256void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev, 257 struct hns_roce_cmd_mailbox *mailbox) 258{ 259 if (!mailbox) 260 return; 261 262 dma_pool_free(hr_dev->cmd.pool, mailbox->buf, mailbox->dma); 263 kfree(mailbox); 264} 265 266int hns_roce_create_hw_ctx(struct hns_roce_dev *dev, 267 struct hns_roce_cmd_mailbox *mailbox, 268 u8 cmd, unsigned long idx) 269{ 270 return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cmd, idx); 271} 272 273int hns_roce_destroy_hw_ctx(struct hns_roce_dev *dev, u8 cmd, unsigned long idx) 274{ 275 return hns_roce_cmd_mbox(dev, 0, 0, cmd, idx); 276}