cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mthca_cmd.c (58656B)


      1/*
      2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
      3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
      4 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
      5 *
      6 * This software is available to you under a choice of one of two
      7 * licenses.  You may choose to be licensed under the terms of the GNU
      8 * General Public License (GPL) Version 2, available from the file
      9 * COPYING in the main directory of this source tree, or the
     10 * OpenIB.org BSD license below:
     11 *
     12 *     Redistribution and use in source and binary forms, with or
     13 *     without modification, are permitted provided that the following
     14 *     conditions are met:
     15 *
     16 *      - Redistributions of source code must retain the above
     17 *        copyright notice, this list of conditions and the following
     18 *        disclaimer.
     19 *
     20 *      - Redistributions in binary form must reproduce the above
     21 *        copyright notice, this list of conditions and the following
     22 *        disclaimer in the documentation and/or other materials
     23 *        provided with the distribution.
     24 *
     25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     32 * SOFTWARE.
     33 */
     34
     35#include <linux/completion.h>
     36#include <linux/pci.h>
     37#include <linux/errno.h>
     38#include <linux/sched.h>
     39#include <linux/module.h>
     40#include <linux/slab.h>
     41#include <asm/io.h>
     42#include <rdma/ib_mad.h>
     43
     44#include "mthca_dev.h"
     45#include "mthca_config_reg.h"
     46#include "mthca_cmd.h"
     47#include "mthca_memfree.h"
     48
     49#define CMD_POLL_TOKEN 0xffff
     50
     51enum {
     52	HCR_IN_PARAM_OFFSET    = 0x00,
     53	HCR_IN_MODIFIER_OFFSET = 0x08,
     54	HCR_OUT_PARAM_OFFSET   = 0x0c,
     55	HCR_TOKEN_OFFSET       = 0x14,
     56	HCR_STATUS_OFFSET      = 0x18,
     57
     58	HCR_OPMOD_SHIFT        = 12,
     59	HCA_E_BIT              = 22,
     60	HCR_GO_BIT             = 23
     61};
     62
     63enum {
     64	/* initialization and general commands */
     65	CMD_SYS_EN          = 0x1,
     66	CMD_SYS_DIS         = 0x2,
     67	CMD_MAP_FA          = 0xfff,
     68	CMD_UNMAP_FA        = 0xffe,
     69	CMD_RUN_FW          = 0xff6,
     70	CMD_MOD_STAT_CFG    = 0x34,
     71	CMD_QUERY_DEV_LIM   = 0x3,
     72	CMD_QUERY_FW        = 0x4,
     73	CMD_ENABLE_LAM      = 0xff8,
     74	CMD_DISABLE_LAM     = 0xff7,
     75	CMD_QUERY_DDR       = 0x5,
     76	CMD_QUERY_ADAPTER   = 0x6,
     77	CMD_INIT_HCA        = 0x7,
     78	CMD_CLOSE_HCA       = 0x8,
     79	CMD_INIT_IB         = 0x9,
     80	CMD_CLOSE_IB        = 0xa,
     81	CMD_QUERY_HCA       = 0xb,
     82	CMD_SET_IB          = 0xc,
     83	CMD_ACCESS_DDR      = 0x2e,
     84	CMD_MAP_ICM         = 0xffa,
     85	CMD_UNMAP_ICM       = 0xff9,
     86	CMD_MAP_ICM_AUX     = 0xffc,
     87	CMD_UNMAP_ICM_AUX   = 0xffb,
     88	CMD_SET_ICM_SIZE    = 0xffd,
     89
     90	/* TPT commands */
     91	CMD_SW2HW_MPT 	    = 0xd,
     92	CMD_QUERY_MPT 	    = 0xe,
     93	CMD_HW2SW_MPT 	    = 0xf,
     94	CMD_READ_MTT        = 0x10,
     95	CMD_WRITE_MTT       = 0x11,
     96	CMD_SYNC_TPT        = 0x2f,
     97
     98	/* EQ commands */
     99	CMD_MAP_EQ          = 0x12,
    100	CMD_SW2HW_EQ 	    = 0x13,
    101	CMD_HW2SW_EQ 	    = 0x14,
    102	CMD_QUERY_EQ        = 0x15,
    103
    104	/* CQ commands */
    105	CMD_SW2HW_CQ 	    = 0x16,
    106	CMD_HW2SW_CQ 	    = 0x17,
    107	CMD_QUERY_CQ 	    = 0x18,
    108	CMD_RESIZE_CQ       = 0x2c,
    109
    110	/* SRQ commands */
    111	CMD_SW2HW_SRQ 	    = 0x35,
    112	CMD_HW2SW_SRQ 	    = 0x36,
    113	CMD_QUERY_SRQ       = 0x37,
    114	CMD_ARM_SRQ         = 0x40,
    115
    116	/* QP/EE commands */
    117	CMD_RST2INIT_QPEE   = 0x19,
    118	CMD_INIT2RTR_QPEE   = 0x1a,
    119	CMD_RTR2RTS_QPEE    = 0x1b,
    120	CMD_RTS2RTS_QPEE    = 0x1c,
    121	CMD_SQERR2RTS_QPEE  = 0x1d,
    122	CMD_2ERR_QPEE       = 0x1e,
    123	CMD_RTS2SQD_QPEE    = 0x1f,
    124	CMD_SQD2SQD_QPEE    = 0x38,
    125	CMD_SQD2RTS_QPEE    = 0x20,
    126	CMD_ERR2RST_QPEE    = 0x21,
    127	CMD_QUERY_QPEE      = 0x22,
    128	CMD_INIT2INIT_QPEE  = 0x2d,
    129	CMD_SUSPEND_QPEE    = 0x32,
    130	CMD_UNSUSPEND_QPEE  = 0x33,
    131	/* special QPs and management commands */
    132	CMD_CONF_SPECIAL_QP = 0x23,
    133	CMD_MAD_IFC         = 0x24,
    134
    135	/* multicast commands */
    136	CMD_READ_MGM        = 0x25,
    137	CMD_WRITE_MGM       = 0x26,
    138	CMD_MGID_HASH       = 0x27,
    139
    140	/* miscellaneous commands */
    141	CMD_DIAG_RPRT       = 0x30,
    142	CMD_NOP             = 0x31,
    143
    144	/* debug commands */
    145	CMD_QUERY_DEBUG_MSG = 0x2a,
    146	CMD_SET_DEBUG_MSG   = 0x2b,
    147};
    148
    149/*
    150 * According to Mellanox code, FW may be starved and never complete
    151 * commands.  So we can't use strict timeouts described in PRM -- we
    152 * just arbitrarily select 60 seconds for now.
    153 */
    154#if 0
    155/*
    156 * Round up and add 1 to make sure we get the full wait time (since we
    157 * will be starting in the middle of a jiffy)
    158 */
    159enum {
    160	CMD_TIME_CLASS_A = (HZ + 999) / 1000 + 1,
    161	CMD_TIME_CLASS_B = (HZ +  99) /  100 + 1,
    162	CMD_TIME_CLASS_C = (HZ +   9) /   10 + 1,
    163	CMD_TIME_CLASS_D = 60 * HZ
    164};
    165#else
    166enum {
    167	CMD_TIME_CLASS_A = 60 * HZ,
    168	CMD_TIME_CLASS_B = 60 * HZ,
    169	CMD_TIME_CLASS_C = 60 * HZ,
    170	CMD_TIME_CLASS_D = 60 * HZ
    171};
    172#endif
    173
    174enum {
    175	GO_BIT_TIMEOUT = HZ * 10
    176};
    177
    178struct mthca_cmd_context {
    179	struct completion done;
    180	int               result;
    181	int               next;
    182	u64               out_param;
    183	u16               token;
    184	u8                status;
    185};
    186
    187static int fw_cmd_doorbell = 0;
    188module_param(fw_cmd_doorbell, int, 0644);
    189MODULE_PARM_DESC(fw_cmd_doorbell, "post FW commands through doorbell page if nonzero "
    190		 "(and supported by FW)");
    191
    192static inline int go_bit(struct mthca_dev *dev)
    193{
    194	return readl(dev->hcr + HCR_STATUS_OFFSET) &
    195		swab32(1 << HCR_GO_BIT);
    196}
    197
    198static void mthca_cmd_post_dbell(struct mthca_dev *dev,
    199				 u64 in_param,
    200				 u64 out_param,
    201				 u32 in_modifier,
    202				 u8 op_modifier,
    203				 u16 op,
    204				 u16 token)
    205{
    206	void __iomem *ptr = dev->cmd.dbell_map;
    207	u16 *offs = dev->cmd.dbell_offsets;
    208
    209	__raw_writel((__force u32) cpu_to_be32(in_param >> 32),           ptr + offs[0]);
    210	wmb();
    211	__raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful),  ptr + offs[1]);
    212	wmb();
    213	__raw_writel((__force u32) cpu_to_be32(in_modifier),              ptr + offs[2]);
    214	wmb();
    215	__raw_writel((__force u32) cpu_to_be32(out_param >> 32),          ptr + offs[3]);
    216	wmb();
    217	__raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), ptr + offs[4]);
    218	wmb();
    219	__raw_writel((__force u32) cpu_to_be32(token << 16),              ptr + offs[5]);
    220	wmb();
    221	__raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT)                |
    222					       (1 << HCA_E_BIT)                 |
    223					       (op_modifier << HCR_OPMOD_SHIFT) |
    224						op),			  ptr + offs[6]);
    225	wmb();
    226	__raw_writel((__force u32) 0,                                     ptr + offs[7]);
    227	wmb();
    228}
    229
    230static int mthca_cmd_post_hcr(struct mthca_dev *dev,
    231			      u64 in_param,
    232			      u64 out_param,
    233			      u32 in_modifier,
    234			      u8 op_modifier,
    235			      u16 op,
    236			      u16 token,
    237			      int event)
    238{
    239	if (event) {
    240		unsigned long end = jiffies + GO_BIT_TIMEOUT;
    241
    242		while (go_bit(dev) && time_before(jiffies, end)) {
    243			set_current_state(TASK_RUNNING);
    244			schedule();
    245		}
    246	}
    247
    248	if (go_bit(dev))
    249		return -EAGAIN;
    250
    251	/*
    252	 * We use writel (instead of something like memcpy_toio)
    253	 * because writes of less than 32 bits to the HCR don't work
    254	 * (and some architectures such as ia64 implement memcpy_toio
    255	 * in terms of writeb).
    256	 */
    257	__raw_writel((__force u32) cpu_to_be32(in_param >> 32),           dev->hcr + 0 * 4);
    258	__raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful),  dev->hcr + 1 * 4);
    259	__raw_writel((__force u32) cpu_to_be32(in_modifier),              dev->hcr + 2 * 4);
    260	__raw_writel((__force u32) cpu_to_be32(out_param >> 32),          dev->hcr + 3 * 4);
    261	__raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), dev->hcr + 4 * 4);
    262	__raw_writel((__force u32) cpu_to_be32(token << 16),              dev->hcr + 5 * 4);
    263
    264	/* __raw_writel may not order writes. */
    265	wmb();
    266
    267	__raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT)                |
    268					       (event ? (1 << HCA_E_BIT) : 0)   |
    269					       (op_modifier << HCR_OPMOD_SHIFT) |
    270					       op),                       dev->hcr + 6 * 4);
    271
    272	return 0;
    273}
    274
    275static int mthca_cmd_post(struct mthca_dev *dev,
    276			  u64 in_param,
    277			  u64 out_param,
    278			  u32 in_modifier,
    279			  u8 op_modifier,
    280			  u16 op,
    281			  u16 token,
    282			  int event)
    283{
    284	int err = 0;
    285
    286	mutex_lock(&dev->cmd.hcr_mutex);
    287
    288	if (event && dev->cmd.flags & MTHCA_CMD_POST_DOORBELLS && fw_cmd_doorbell)
    289		mthca_cmd_post_dbell(dev, in_param, out_param, in_modifier,
    290					   op_modifier, op, token);
    291	else
    292		err = mthca_cmd_post_hcr(dev, in_param, out_param, in_modifier,
    293					 op_modifier, op, token, event);
    294
    295	mutex_unlock(&dev->cmd.hcr_mutex);
    296	return err;
    297}
    298
    299
    300static int mthca_status_to_errno(u8 status)
    301{
    302	static const int trans_table[] = {
    303		[MTHCA_CMD_STAT_INTERNAL_ERR]   = -EIO,
    304		[MTHCA_CMD_STAT_BAD_OP]         = -EPERM,
    305		[MTHCA_CMD_STAT_BAD_PARAM]      = -EINVAL,
    306		[MTHCA_CMD_STAT_BAD_SYS_STATE]  = -ENXIO,
    307		[MTHCA_CMD_STAT_BAD_RESOURCE]   = -EBADF,
    308		[MTHCA_CMD_STAT_RESOURCE_BUSY]  = -EBUSY,
    309		[MTHCA_CMD_STAT_DDR_MEM_ERR]    = -ENOMEM,
    310		[MTHCA_CMD_STAT_EXCEED_LIM]     = -ENOMEM,
    311		[MTHCA_CMD_STAT_BAD_RES_STATE]  = -EBADF,
    312		[MTHCA_CMD_STAT_BAD_INDEX]      = -EBADF,
    313		[MTHCA_CMD_STAT_BAD_NVMEM]      = -EFAULT,
    314		[MTHCA_CMD_STAT_BAD_QPEE_STATE] = -EINVAL,
    315		[MTHCA_CMD_STAT_BAD_SEG_PARAM]  = -EFAULT,
    316		[MTHCA_CMD_STAT_REG_BOUND]      = -EBUSY,
    317		[MTHCA_CMD_STAT_LAM_NOT_PRE]    = -EAGAIN,
    318		[MTHCA_CMD_STAT_BAD_PKT]        = -EBADMSG,
    319		[MTHCA_CMD_STAT_BAD_SIZE]       = -ENOMEM,
    320	};
    321
    322	if (status >= ARRAY_SIZE(trans_table) ||
    323			(status != MTHCA_CMD_STAT_OK
    324			 && trans_table[status] == 0))
    325		return -EINVAL;
    326
    327	return trans_table[status];
    328}
    329
    330
    331static int mthca_cmd_poll(struct mthca_dev *dev,
    332			  u64 in_param,
    333			  u64 *out_param,
    334			  int out_is_imm,
    335			  u32 in_modifier,
    336			  u8 op_modifier,
    337			  u16 op,
    338			  unsigned long timeout)
    339{
    340	int err = 0;
    341	unsigned long end;
    342	u8 status;
    343
    344	down(&dev->cmd.poll_sem);
    345
    346	err = mthca_cmd_post(dev, in_param,
    347			     out_param ? *out_param : 0,
    348			     in_modifier, op_modifier,
    349			     op, CMD_POLL_TOKEN, 0);
    350	if (err)
    351		goto out;
    352
    353	end = timeout + jiffies;
    354	while (go_bit(dev) && time_before(jiffies, end)) {
    355		set_current_state(TASK_RUNNING);
    356		schedule();
    357	}
    358
    359	if (go_bit(dev)) {
    360		err = -EBUSY;
    361		goto out;
    362	}
    363
    364	if (out_is_imm && out_param) {
    365		*out_param =
    366			(u64) be32_to_cpu((__force __be32)
    367					  __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
    368			(u64) be32_to_cpu((__force __be32)
    369					  __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET + 4));
    370	} else if (out_is_imm) {
    371		err = -EINVAL;
    372		goto out;
    373	}
    374
    375	status = be32_to_cpu((__force __be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24;
    376	if (status) {
    377		mthca_dbg(dev, "Command %02x completed with status %02x\n",
    378			  op, status);
    379		err = mthca_status_to_errno(status);
    380	}
    381
    382out:
    383	up(&dev->cmd.poll_sem);
    384	return err;
    385}
    386
    387void mthca_cmd_event(struct mthca_dev *dev,
    388		     u16 token,
    389		     u8  status,
    390		     u64 out_param)
    391{
    392	struct mthca_cmd_context *context =
    393		&dev->cmd.context[token & dev->cmd.token_mask];
    394
    395	/* previously timed out command completing at long last */
    396	if (token != context->token)
    397		return;
    398
    399	context->result    = 0;
    400	context->status    = status;
    401	context->out_param = out_param;
    402
    403	complete(&context->done);
    404}
    405
    406static int mthca_cmd_wait(struct mthca_dev *dev,
    407			  u64 in_param,
    408			  u64 *out_param,
    409			  int out_is_imm,
    410			  u32 in_modifier,
    411			  u8 op_modifier,
    412			  u16 op,
    413			  unsigned long timeout)
    414{
    415	int err = 0;
    416	struct mthca_cmd_context *context;
    417
    418	down(&dev->cmd.event_sem);
    419
    420	spin_lock(&dev->cmd.context_lock);
    421	BUG_ON(dev->cmd.free_head < 0);
    422	context = &dev->cmd.context[dev->cmd.free_head];
    423	context->token += dev->cmd.token_mask + 1;
    424	dev->cmd.free_head = context->next;
    425	spin_unlock(&dev->cmd.context_lock);
    426
    427	init_completion(&context->done);
    428
    429	err = mthca_cmd_post(dev, in_param,
    430			     out_param ? *out_param : 0,
    431			     in_modifier, op_modifier,
    432			     op, context->token, 1);
    433	if (err)
    434		goto out;
    435
    436	if (!wait_for_completion_timeout(&context->done, timeout)) {
    437		err = -EBUSY;
    438		goto out;
    439	}
    440
    441	err = context->result;
    442	if (err)
    443		goto out;
    444
    445	if (context->status) {
    446		mthca_dbg(dev, "Command %02x completed with status %02x\n",
    447			  op, context->status);
    448		err = mthca_status_to_errno(context->status);
    449	}
    450
    451	if (out_is_imm && out_param) {
    452		*out_param = context->out_param;
    453	} else if (out_is_imm) {
    454		err = -EINVAL;
    455		goto out;
    456	}
    457
    458out:
    459	spin_lock(&dev->cmd.context_lock);
    460	context->next = dev->cmd.free_head;
    461	dev->cmd.free_head = context - dev->cmd.context;
    462	spin_unlock(&dev->cmd.context_lock);
    463
    464	up(&dev->cmd.event_sem);
    465	return err;
    466}
    467
    468/* Invoke a command with an output mailbox */
    469static int mthca_cmd_box(struct mthca_dev *dev,
    470			 u64 in_param,
    471			 u64 out_param,
    472			 u32 in_modifier,
    473			 u8 op_modifier,
    474			 u16 op,
    475			 unsigned long timeout)
    476{
    477	if (dev->cmd.flags & MTHCA_CMD_USE_EVENTS)
    478		return mthca_cmd_wait(dev, in_param, &out_param, 0,
    479				      in_modifier, op_modifier, op,
    480				      timeout);
    481	else
    482		return mthca_cmd_poll(dev, in_param, &out_param, 0,
    483				      in_modifier, op_modifier, op,
    484				      timeout);
    485}
    486
    487/* Invoke a command with no output parameter */
    488static int mthca_cmd(struct mthca_dev *dev,
    489		     u64 in_param,
    490		     u32 in_modifier,
    491		     u8 op_modifier,
    492		     u16 op,
    493		     unsigned long timeout)
    494{
    495	return mthca_cmd_box(dev, in_param, 0, in_modifier,
    496			     op_modifier, op, timeout);
    497}
    498
    499/*
    500 * Invoke a command with an immediate output parameter (and copy the
    501 * output into the caller's out_param pointer after the command
    502 * executes).
    503 */
    504static int mthca_cmd_imm(struct mthca_dev *dev,
    505			 u64 in_param,
    506			 u64 *out_param,
    507			 u32 in_modifier,
    508			 u8 op_modifier,
    509			 u16 op,
    510			 unsigned long timeout)
    511{
    512	if (dev->cmd.flags & MTHCA_CMD_USE_EVENTS)
    513		return mthca_cmd_wait(dev, in_param, out_param, 1,
    514				      in_modifier, op_modifier, op,
    515				      timeout);
    516	else
    517		return mthca_cmd_poll(dev, in_param, out_param, 1,
    518				      in_modifier, op_modifier, op,
    519				      timeout);
    520}
    521
    522int mthca_cmd_init(struct mthca_dev *dev)
    523{
    524	mutex_init(&dev->cmd.hcr_mutex);
    525	sema_init(&dev->cmd.poll_sem, 1);
    526	dev->cmd.flags = 0;
    527
    528	dev->hcr = ioremap(pci_resource_start(dev->pdev, 0) + MTHCA_HCR_BASE,
    529			   MTHCA_HCR_SIZE);
    530	if (!dev->hcr) {
    531		mthca_err(dev, "Couldn't map command register.");
    532		return -ENOMEM;
    533	}
    534
    535	dev->cmd.pool = dma_pool_create("mthca_cmd", &dev->pdev->dev,
    536					MTHCA_MAILBOX_SIZE,
    537					MTHCA_MAILBOX_SIZE, 0);
    538	if (!dev->cmd.pool) {
    539		iounmap(dev->hcr);
    540		return -ENOMEM;
    541	}
    542
    543	return 0;
    544}
    545
    546void mthca_cmd_cleanup(struct mthca_dev *dev)
    547{
    548	dma_pool_destroy(dev->cmd.pool);
    549	iounmap(dev->hcr);
    550	if (dev->cmd.flags & MTHCA_CMD_POST_DOORBELLS)
    551		iounmap(dev->cmd.dbell_map);
    552}
    553
    554/*
    555 * Switch to using events to issue FW commands (should be called after
    556 * event queue to command events has been initialized).
    557 */
    558int mthca_cmd_use_events(struct mthca_dev *dev)
    559{
    560	int i;
    561
    562	dev->cmd.context = kmalloc_array(dev->cmd.max_cmds,
    563					 sizeof(struct mthca_cmd_context),
    564					 GFP_KERNEL);
    565	if (!dev->cmd.context)
    566		return -ENOMEM;
    567
    568	for (i = 0; i < dev->cmd.max_cmds; ++i) {
    569		dev->cmd.context[i].token = i;
    570		dev->cmd.context[i].next = i + 1;
    571	}
    572
    573	dev->cmd.context[dev->cmd.max_cmds - 1].next = -1;
    574	dev->cmd.free_head = 0;
    575
    576	sema_init(&dev->cmd.event_sem, dev->cmd.max_cmds);
    577	spin_lock_init(&dev->cmd.context_lock);
    578
    579	for (dev->cmd.token_mask = 1;
    580	     dev->cmd.token_mask < dev->cmd.max_cmds;
    581	     dev->cmd.token_mask <<= 1)
    582		; /* nothing */
    583	--dev->cmd.token_mask;
    584
    585	dev->cmd.flags |= MTHCA_CMD_USE_EVENTS;
    586
    587	down(&dev->cmd.poll_sem);
    588
    589	return 0;
    590}
    591
    592/*
    593 * Switch back to polling (used when shutting down the device)
    594 */
    595void mthca_cmd_use_polling(struct mthca_dev *dev)
    596{
    597	int i;
    598
    599	dev->cmd.flags &= ~MTHCA_CMD_USE_EVENTS;
    600
    601	for (i = 0; i < dev->cmd.max_cmds; ++i)
    602		down(&dev->cmd.event_sem);
    603
    604	kfree(dev->cmd.context);
    605
    606	up(&dev->cmd.poll_sem);
    607}
    608
    609struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
    610					  gfp_t gfp_mask)
    611{
    612	struct mthca_mailbox *mailbox;
    613
    614	mailbox = kmalloc(sizeof *mailbox, gfp_mask);
    615	if (!mailbox)
    616		return ERR_PTR(-ENOMEM);
    617
    618	mailbox->buf = dma_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma);
    619	if (!mailbox->buf) {
    620		kfree(mailbox);
    621		return ERR_PTR(-ENOMEM);
    622	}
    623
    624	return mailbox;
    625}
    626
    627void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox)
    628{
    629	if (!mailbox)
    630		return;
    631
    632	dma_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
    633	kfree(mailbox);
    634}
    635
    636int mthca_SYS_EN(struct mthca_dev *dev)
    637{
    638	u64 out;
    639	int ret;
    640
    641	ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, CMD_TIME_CLASS_D);
    642
    643	if (ret == -ENOMEM)
    644		mthca_warn(dev, "SYS_EN DDR error: syn=%x, sock=%d, "
    645			   "sladdr=%d, SPD source=%s\n",
    646			   (int) (out >> 6) & 0xf, (int) (out >> 4) & 3,
    647			   (int) (out >> 1) & 7, (int) out & 1 ? "NVMEM" : "DIMM");
    648
    649	return ret;
    650}
    651
    652int mthca_SYS_DIS(struct mthca_dev *dev)
    653{
    654	return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C);
    655}
    656
    657static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
    658			 u64 virt)
    659{
    660	struct mthca_mailbox *mailbox;
    661	struct mthca_icm_iter iter;
    662	__be64 *pages;
    663	int lg;
    664	int nent = 0;
    665	int i;
    666	int err = 0;
    667	int ts = 0, tc = 0;
    668
    669	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
    670	if (IS_ERR(mailbox))
    671		return PTR_ERR(mailbox);
    672	memset(mailbox->buf, 0, MTHCA_MAILBOX_SIZE);
    673	pages = mailbox->buf;
    674
    675	for (mthca_icm_first(icm, &iter);
    676	     !mthca_icm_last(&iter);
    677	     mthca_icm_next(&iter)) {
    678		/*
    679		 * We have to pass pages that are aligned to their
    680		 * size, so find the least significant 1 in the
    681		 * address or size and use that as our log2 size.
    682		 */
    683		lg = ffs(mthca_icm_addr(&iter) | mthca_icm_size(&iter)) - 1;
    684		if (lg < MTHCA_ICM_PAGE_SHIFT) {
    685			mthca_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
    686				   MTHCA_ICM_PAGE_SIZE,
    687				   (unsigned long long) mthca_icm_addr(&iter),
    688				   mthca_icm_size(&iter));
    689			err = -EINVAL;
    690			goto out;
    691		}
    692		for (i = 0; i < mthca_icm_size(&iter) >> lg; ++i) {
    693			if (virt != -1) {
    694				pages[nent * 2] = cpu_to_be64(virt);
    695				virt += 1ULL << lg;
    696			}
    697
    698			pages[nent * 2 + 1] =
    699				cpu_to_be64((mthca_icm_addr(&iter) + (i << lg)) |
    700					    (lg - MTHCA_ICM_PAGE_SHIFT));
    701			ts += 1 << (lg - 10);
    702			++tc;
    703
    704			if (++nent == MTHCA_MAILBOX_SIZE / 16) {
    705				err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
    706						CMD_TIME_CLASS_B);
    707				if (err)
    708					goto out;
    709				nent = 0;
    710			}
    711		}
    712	}
    713
    714	if (nent)
    715		err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
    716				CMD_TIME_CLASS_B);
    717
    718	switch (op) {
    719	case CMD_MAP_FA:
    720		mthca_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
    721		break;
    722	case CMD_MAP_ICM_AUX:
    723		mthca_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
    724		break;
    725	case CMD_MAP_ICM:
    726		mthca_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
    727			  tc, ts, (unsigned long long) virt - (ts << 10));
    728		break;
    729	}
    730
    731out:
    732	mthca_free_mailbox(dev, mailbox);
    733	return err;
    734}
    735
    736int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm)
    737{
    738	return mthca_map_cmd(dev, CMD_MAP_FA, icm, -1);
    739}
    740
    741int mthca_UNMAP_FA(struct mthca_dev *dev)
    742{
    743	return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_FA, CMD_TIME_CLASS_B);
    744}
    745
    746int mthca_RUN_FW(struct mthca_dev *dev)
    747{
    748	return mthca_cmd(dev, 0, 0, 0, CMD_RUN_FW, CMD_TIME_CLASS_A);
    749}
    750
    751static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
    752{
    753	phys_addr_t addr;
    754	u16 max_off = 0;
    755	int i;
    756
    757	for (i = 0; i < 8; ++i)
    758		max_off = max(max_off, dev->cmd.dbell_offsets[i]);
    759
    760	if ((base & PAGE_MASK) != ((base + max_off) & PAGE_MASK)) {
    761		mthca_warn(dev, "Firmware doorbell region at 0x%016llx, "
    762			   "length 0x%x crosses a page boundary\n",
    763			   (unsigned long long) base, max_off);
    764		return;
    765	}
    766
    767	addr = pci_resource_start(dev->pdev, 2) +
    768		((pci_resource_len(dev->pdev, 2) - 1) & base);
    769	dev->cmd.dbell_map = ioremap(addr, max_off + sizeof(u32));
    770	if (!dev->cmd.dbell_map)
    771		return;
    772
    773	dev->cmd.flags |= MTHCA_CMD_POST_DOORBELLS;
    774	mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
    775}
    776
    777int mthca_QUERY_FW(struct mthca_dev *dev)
    778{
    779	struct mthca_mailbox *mailbox;
    780	u32 *outbox;
    781	u64 base;
    782	u32 tmp;
    783	int err = 0;
    784	u8 lg;
    785	int i;
    786
    787#define QUERY_FW_OUT_SIZE             0x100
    788#define QUERY_FW_VER_OFFSET            0x00
    789#define QUERY_FW_MAX_CMD_OFFSET        0x0f
    790#define QUERY_FW_ERR_START_OFFSET      0x30
    791#define QUERY_FW_ERR_SIZE_OFFSET       0x38
    792
    793#define QUERY_FW_CMD_DB_EN_OFFSET      0x10
    794#define QUERY_FW_CMD_DB_OFFSET         0x50
    795#define QUERY_FW_CMD_DB_BASE           0x60
    796
    797#define QUERY_FW_START_OFFSET          0x20
    798#define QUERY_FW_END_OFFSET            0x28
    799
    800#define QUERY_FW_SIZE_OFFSET           0x00
    801#define QUERY_FW_CLR_INT_BASE_OFFSET   0x20
    802#define QUERY_FW_EQ_ARM_BASE_OFFSET    0x40
    803#define QUERY_FW_EQ_SET_CI_BASE_OFFSET 0x48
    804
    805	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
    806	if (IS_ERR(mailbox))
    807		return PTR_ERR(mailbox);
    808	outbox = mailbox->buf;
    809
    810	err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_FW,
    811			    CMD_TIME_CLASS_A);
    812
    813	if (err)
    814		goto out;
    815
    816	MTHCA_GET(dev->fw_ver,   outbox, QUERY_FW_VER_OFFSET);
    817	/*
    818	 * FW subminor version is at more significant bits than minor
    819	 * version, so swap here.
    820	 */
    821	dev->fw_ver = (dev->fw_ver & 0xffff00000000ull) |
    822		((dev->fw_ver & 0xffff0000ull) >> 16) |
    823		((dev->fw_ver & 0x0000ffffull) << 16);
    824
    825	MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
    826	dev->cmd.max_cmds = 1 << lg;
    827
    828	mthca_dbg(dev, "FW version %012llx, max commands %d\n",
    829		  (unsigned long long) dev->fw_ver, dev->cmd.max_cmds);
    830
    831	MTHCA_GET(dev->catas_err.addr, outbox, QUERY_FW_ERR_START_OFFSET);
    832	MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
    833
    834	mthca_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x\n",
    835		  (unsigned long long) dev->catas_err.addr, dev->catas_err.size);
    836
    837	MTHCA_GET(tmp, outbox, QUERY_FW_CMD_DB_EN_OFFSET);
    838	if (tmp & 0x1) {
    839		mthca_dbg(dev, "FW supports commands through doorbells\n");
    840
    841		MTHCA_GET(base, outbox, QUERY_FW_CMD_DB_BASE);
    842		for (i = 0; i < MTHCA_CMD_NUM_DBELL_DWORDS; ++i)
    843			MTHCA_GET(dev->cmd.dbell_offsets[i], outbox,
    844				  QUERY_FW_CMD_DB_OFFSET + (i << 1));
    845
    846		mthca_setup_cmd_doorbells(dev, base);
    847	}
    848
    849	if (mthca_is_memfree(dev)) {
    850		MTHCA_GET(dev->fw.arbel.fw_pages,       outbox, QUERY_FW_SIZE_OFFSET);
    851		MTHCA_GET(dev->fw.arbel.clr_int_base,   outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
    852		MTHCA_GET(dev->fw.arbel.eq_arm_base,    outbox, QUERY_FW_EQ_ARM_BASE_OFFSET);
    853		MTHCA_GET(dev->fw.arbel.eq_set_ci_base, outbox, QUERY_FW_EQ_SET_CI_BASE_OFFSET);
    854		mthca_dbg(dev, "FW size %d KB\n", dev->fw.arbel.fw_pages << 2);
    855
    856		/*
    857		 * Round up number of system pages needed in case
    858		 * MTHCA_ICM_PAGE_SIZE < PAGE_SIZE.
    859		 */
    860		dev->fw.arbel.fw_pages =
    861			ALIGN(dev->fw.arbel.fw_pages, PAGE_SIZE / MTHCA_ICM_PAGE_SIZE) >>
    862				(PAGE_SHIFT - MTHCA_ICM_PAGE_SHIFT);
    863
    864		mthca_dbg(dev, "Clear int @ %llx, EQ arm @ %llx, EQ set CI @ %llx\n",
    865			  (unsigned long long) dev->fw.arbel.clr_int_base,
    866			  (unsigned long long) dev->fw.arbel.eq_arm_base,
    867			  (unsigned long long) dev->fw.arbel.eq_set_ci_base);
    868	} else {
    869		MTHCA_GET(dev->fw.tavor.fw_start, outbox, QUERY_FW_START_OFFSET);
    870		MTHCA_GET(dev->fw.tavor.fw_end,   outbox, QUERY_FW_END_OFFSET);
    871
    872		mthca_dbg(dev, "FW size %d KB (start %llx, end %llx)\n",
    873			  (int) ((dev->fw.tavor.fw_end - dev->fw.tavor.fw_start) >> 10),
    874			  (unsigned long long) dev->fw.tavor.fw_start,
    875			  (unsigned long long) dev->fw.tavor.fw_end);
    876	}
    877
    878out:
    879	mthca_free_mailbox(dev, mailbox);
    880	return err;
    881}
    882
    883int mthca_ENABLE_LAM(struct mthca_dev *dev)
    884{
    885	struct mthca_mailbox *mailbox;
    886	u8 info;
    887	u32 *outbox;
    888	int err = 0;
    889
    890#define ENABLE_LAM_OUT_SIZE         0x100
    891#define ENABLE_LAM_START_OFFSET     0x00
    892#define ENABLE_LAM_END_OFFSET       0x08
    893#define ENABLE_LAM_INFO_OFFSET      0x13
    894
    895#define ENABLE_LAM_INFO_HIDDEN_FLAG (1 << 4)
    896#define ENABLE_LAM_INFO_ECC_MASK    0x3
    897
    898	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
    899	if (IS_ERR(mailbox))
    900		return PTR_ERR(mailbox);
    901	outbox = mailbox->buf;
    902
    903	err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_ENABLE_LAM,
    904			    CMD_TIME_CLASS_C);
    905
    906	if (err)
    907		goto out;
    908
    909	MTHCA_GET(dev->ddr_start, outbox, ENABLE_LAM_START_OFFSET);
    910	MTHCA_GET(dev->ddr_end,   outbox, ENABLE_LAM_END_OFFSET);
    911	MTHCA_GET(info,           outbox, ENABLE_LAM_INFO_OFFSET);
    912
    913	if (!!(info & ENABLE_LAM_INFO_HIDDEN_FLAG) !=
    914	    !!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
    915		mthca_info(dev, "FW reports that HCA-attached memory "
    916			   "is %s hidden; does not match PCI config\n",
    917			   (info & ENABLE_LAM_INFO_HIDDEN_FLAG) ?
    918			   "" : "not");
    919	}
    920	if (info & ENABLE_LAM_INFO_HIDDEN_FLAG)
    921		mthca_dbg(dev, "HCA-attached memory is hidden.\n");
    922
    923	mthca_dbg(dev, "HCA memory size %d KB (start %llx, end %llx)\n",
    924		  (int) ((dev->ddr_end - dev->ddr_start) >> 10),
    925		  (unsigned long long) dev->ddr_start,
    926		  (unsigned long long) dev->ddr_end);
    927
    928out:
    929	mthca_free_mailbox(dev, mailbox);
    930	return err;
    931}
    932
    933int mthca_DISABLE_LAM(struct mthca_dev *dev)
    934{
    935	return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C);
    936}
    937
    938int mthca_QUERY_DDR(struct mthca_dev *dev)
    939{
    940	struct mthca_mailbox *mailbox;
    941	u8 info;
    942	u32 *outbox;
    943	int err = 0;
    944
    945#define QUERY_DDR_OUT_SIZE         0x100
    946#define QUERY_DDR_START_OFFSET     0x00
    947#define QUERY_DDR_END_OFFSET       0x08
    948#define QUERY_DDR_INFO_OFFSET      0x13
    949
    950#define QUERY_DDR_INFO_HIDDEN_FLAG (1 << 4)
    951#define QUERY_DDR_INFO_ECC_MASK    0x3
    952
    953	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
    954	if (IS_ERR(mailbox))
    955		return PTR_ERR(mailbox);
    956	outbox = mailbox->buf;
    957
    958	err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DDR,
    959			    CMD_TIME_CLASS_A);
    960
    961	if (err)
    962		goto out;
    963
    964	MTHCA_GET(dev->ddr_start, outbox, QUERY_DDR_START_OFFSET);
    965	MTHCA_GET(dev->ddr_end,   outbox, QUERY_DDR_END_OFFSET);
    966	MTHCA_GET(info,           outbox, QUERY_DDR_INFO_OFFSET);
    967
    968	if (!!(info & QUERY_DDR_INFO_HIDDEN_FLAG) !=
    969	    !!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
    970		mthca_info(dev, "FW reports that HCA-attached memory "
    971			   "is %s hidden; does not match PCI config\n",
    972			   (info & QUERY_DDR_INFO_HIDDEN_FLAG) ?
    973			   "" : "not");
    974	}
    975	if (info & QUERY_DDR_INFO_HIDDEN_FLAG)
    976		mthca_dbg(dev, "HCA-attached memory is hidden.\n");
    977
    978	mthca_dbg(dev, "HCA memory size %d KB (start %llx, end %llx)\n",
    979		  (int) ((dev->ddr_end - dev->ddr_start) >> 10),
    980		  (unsigned long long) dev->ddr_start,
    981		  (unsigned long long) dev->ddr_end);
    982
    983out:
    984	mthca_free_mailbox(dev, mailbox);
    985	return err;
    986}
    987
    988int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
    989			struct mthca_dev_lim *dev_lim)
    990{
    991	struct mthca_mailbox *mailbox;
    992	u32 *outbox;
    993	u8 field;
    994	u16 size;
    995	u16 stat_rate;
    996	int err;
    997
    998#define QUERY_DEV_LIM_OUT_SIZE             0x100
    999#define QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET     0x10
   1000#define QUERY_DEV_LIM_MAX_QP_SZ_OFFSET      0x11
   1001#define QUERY_DEV_LIM_RSVD_QP_OFFSET        0x12
   1002#define QUERY_DEV_LIM_MAX_QP_OFFSET         0x13
   1003#define QUERY_DEV_LIM_RSVD_SRQ_OFFSET       0x14
   1004#define QUERY_DEV_LIM_MAX_SRQ_OFFSET        0x15
   1005#define QUERY_DEV_LIM_RSVD_EEC_OFFSET       0x16
   1006#define QUERY_DEV_LIM_MAX_EEC_OFFSET        0x17
   1007#define QUERY_DEV_LIM_MAX_CQ_SZ_OFFSET      0x19
   1008#define QUERY_DEV_LIM_RSVD_CQ_OFFSET        0x1a
   1009#define QUERY_DEV_LIM_MAX_CQ_OFFSET         0x1b
   1010#define QUERY_DEV_LIM_MAX_MPT_OFFSET        0x1d
   1011#define QUERY_DEV_LIM_RSVD_EQ_OFFSET        0x1e
   1012#define QUERY_DEV_LIM_MAX_EQ_OFFSET         0x1f
   1013#define QUERY_DEV_LIM_RSVD_MTT_OFFSET       0x20
   1014#define QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET     0x21
   1015#define QUERY_DEV_LIM_RSVD_MRW_OFFSET       0x22
   1016#define QUERY_DEV_LIM_MAX_MTT_SEG_OFFSET    0x23
   1017#define QUERY_DEV_LIM_MAX_AV_OFFSET         0x27
   1018#define QUERY_DEV_LIM_MAX_REQ_QP_OFFSET     0x29
   1019#define QUERY_DEV_LIM_MAX_RES_QP_OFFSET     0x2b
   1020#define QUERY_DEV_LIM_MAX_RDMA_OFFSET       0x2f
   1021#define QUERY_DEV_LIM_RSZ_SRQ_OFFSET        0x33
   1022#define QUERY_DEV_LIM_ACK_DELAY_OFFSET      0x35
   1023#define QUERY_DEV_LIM_MTU_WIDTH_OFFSET      0x36
   1024#define QUERY_DEV_LIM_VL_PORT_OFFSET        0x37
   1025#define QUERY_DEV_LIM_MAX_GID_OFFSET        0x3b
   1026#define QUERY_DEV_LIM_RATE_SUPPORT_OFFSET   0x3c
   1027#define QUERY_DEV_LIM_MAX_PKEY_OFFSET       0x3f
   1028#define QUERY_DEV_LIM_FLAGS_OFFSET          0x44
   1029#define QUERY_DEV_LIM_RSVD_UAR_OFFSET       0x48
   1030#define QUERY_DEV_LIM_UAR_SZ_OFFSET         0x49
   1031#define QUERY_DEV_LIM_PAGE_SZ_OFFSET        0x4b
   1032#define QUERY_DEV_LIM_MAX_SG_OFFSET         0x51
   1033#define QUERY_DEV_LIM_MAX_DESC_SZ_OFFSET    0x52
   1034#define QUERY_DEV_LIM_MAX_SG_RQ_OFFSET      0x55
   1035#define QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET 0x56
   1036#define QUERY_DEV_LIM_MAX_QP_MCG_OFFSET     0x61
   1037#define QUERY_DEV_LIM_RSVD_MCG_OFFSET       0x62
   1038#define QUERY_DEV_LIM_MAX_MCG_OFFSET        0x63
   1039#define QUERY_DEV_LIM_RSVD_PD_OFFSET        0x64
   1040#define QUERY_DEV_LIM_MAX_PD_OFFSET         0x65
   1041#define QUERY_DEV_LIM_RSVD_RDD_OFFSET       0x66
   1042#define QUERY_DEV_LIM_MAX_RDD_OFFSET        0x67
   1043#define QUERY_DEV_LIM_EEC_ENTRY_SZ_OFFSET   0x80
   1044#define QUERY_DEV_LIM_QPC_ENTRY_SZ_OFFSET   0x82
   1045#define QUERY_DEV_LIM_EEEC_ENTRY_SZ_OFFSET  0x84
   1046#define QUERY_DEV_LIM_EQPC_ENTRY_SZ_OFFSET  0x86
   1047#define QUERY_DEV_LIM_EQC_ENTRY_SZ_OFFSET   0x88
   1048#define QUERY_DEV_LIM_CQC_ENTRY_SZ_OFFSET   0x8a
   1049#define QUERY_DEV_LIM_SRQ_ENTRY_SZ_OFFSET   0x8c
   1050#define QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET   0x8e
   1051#define QUERY_DEV_LIM_MTT_ENTRY_SZ_OFFSET   0x90
   1052#define QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET   0x92
   1053#define QUERY_DEV_LIM_PBL_SZ_OFFSET         0x96
   1054#define QUERY_DEV_LIM_BMME_FLAGS_OFFSET     0x97
   1055#define QUERY_DEV_LIM_RSVD_LKEY_OFFSET      0x98
   1056#define QUERY_DEV_LIM_LAMR_OFFSET           0x9f
   1057#define QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET     0xa0
   1058
   1059	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
   1060	if (IS_ERR(mailbox))
   1061		return PTR_ERR(mailbox);
   1062	outbox = mailbox->buf;
   1063
   1064	err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DEV_LIM,
   1065			    CMD_TIME_CLASS_A);
   1066
   1067	if (err)
   1068		goto out;
   1069
   1070	MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_QP_OFFSET);
   1071	dev_lim->reserved_qps = 1 << (field & 0xf);
   1072	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_OFFSET);
   1073	dev_lim->max_qps = 1 << (field & 0x1f);
   1074	MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_SRQ_OFFSET);
   1075	dev_lim->reserved_srqs = 1 << (field >> 4);
   1076	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_OFFSET);
   1077	dev_lim->max_srqs = 1 << (field & 0x1f);
   1078	MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_EEC_OFFSET);
   1079	dev_lim->reserved_eecs = 1 << (field & 0xf);
   1080	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_EEC_OFFSET);
   1081	dev_lim->max_eecs = 1 << (field & 0x1f);
   1082	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_CQ_SZ_OFFSET);
   1083	dev_lim->max_cq_sz = 1 << field;
   1084	MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_CQ_OFFSET);
   1085	dev_lim->reserved_cqs = 1 << (field & 0xf);
   1086	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_CQ_OFFSET);
   1087	dev_lim->max_cqs = 1 << (field & 0x1f);
   1088	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MPT_OFFSET);
   1089	dev_lim->max_mpts = 1 << (field & 0x3f);
   1090	MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_EQ_OFFSET);
   1091	dev_lim->reserved_eqs = 1 << (field & 0xf);
   1092	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_EQ_OFFSET);
   1093	dev_lim->max_eqs = 1 << (field & 0x7);
   1094	MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MTT_OFFSET);
   1095	if (mthca_is_memfree(dev))
   1096		dev_lim->reserved_mtts = ALIGN((1 << (field >> 4)) * sizeof(u64),
   1097					       dev->limits.mtt_seg_size) / dev->limits.mtt_seg_size;
   1098	else
   1099		dev_lim->reserved_mtts = 1 << (field >> 4);
   1100	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET);
   1101	dev_lim->max_mrw_sz = 1 << field;
   1102	MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MRW_OFFSET);
   1103	dev_lim->reserved_mrws = 1 << (field & 0xf);
   1104	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MTT_SEG_OFFSET);
   1105	dev_lim->max_mtt_seg = 1 << (field & 0x3f);
   1106	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_REQ_QP_OFFSET);
   1107	dev_lim->max_requester_per_qp = 1 << (field & 0x3f);
   1108	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RES_QP_OFFSET);
   1109	dev_lim->max_responder_per_qp = 1 << (field & 0x3f);
   1110	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RDMA_OFFSET);
   1111	dev_lim->max_rdma_global = 1 << (field & 0x3f);
   1112	MTHCA_GET(field, outbox, QUERY_DEV_LIM_ACK_DELAY_OFFSET);
   1113	dev_lim->local_ca_ack_delay = field & 0x1f;
   1114	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MTU_WIDTH_OFFSET);
   1115	dev_lim->max_mtu        = field >> 4;
   1116	dev_lim->max_port_width = field & 0xf;
   1117	MTHCA_GET(field, outbox, QUERY_DEV_LIM_VL_PORT_OFFSET);
   1118	dev_lim->max_vl    = field >> 4;
   1119	dev_lim->num_ports = field & 0xf;
   1120	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_GID_OFFSET);
   1121	dev_lim->max_gids = 1 << (field & 0xf);
   1122	MTHCA_GET(stat_rate, outbox, QUERY_DEV_LIM_RATE_SUPPORT_OFFSET);
   1123	dev_lim->stat_rate_support = stat_rate;
   1124	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PKEY_OFFSET);
   1125	dev_lim->max_pkeys = 1 << (field & 0xf);
   1126	MTHCA_GET(dev_lim->flags, outbox, QUERY_DEV_LIM_FLAGS_OFFSET);
   1127	MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_UAR_OFFSET);
   1128	dev_lim->reserved_uars = field >> 4;
   1129	MTHCA_GET(field, outbox, QUERY_DEV_LIM_UAR_SZ_OFFSET);
   1130	dev_lim->uar_size = 1 << ((field & 0x3f) + 20);
   1131	MTHCA_GET(field, outbox, QUERY_DEV_LIM_PAGE_SZ_OFFSET);
   1132	dev_lim->min_page_sz = 1 << field;
   1133	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_OFFSET);
   1134	dev_lim->max_sg = field;
   1135
   1136	MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_OFFSET);
   1137	dev_lim->max_desc_sz = size;
   1138
   1139	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_MCG_OFFSET);
   1140	dev_lim->max_qp_per_mcg = 1 << field;
   1141	MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MCG_OFFSET);
   1142	dev_lim->reserved_mgms = field & 0xf;
   1143	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MCG_OFFSET);
   1144	dev_lim->max_mcgs = 1 << field;
   1145	MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_PD_OFFSET);
   1146	dev_lim->reserved_pds = field >> 4;
   1147	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PD_OFFSET);
   1148	dev_lim->max_pds = 1 << (field & 0x3f);
   1149	MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_RDD_OFFSET);
   1150	dev_lim->reserved_rdds = field >> 4;
   1151	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RDD_OFFSET);
   1152	dev_lim->max_rdds = 1 << (field & 0x3f);
   1153
   1154	MTHCA_GET(size, outbox, QUERY_DEV_LIM_EEC_ENTRY_SZ_OFFSET);
   1155	dev_lim->eec_entry_sz = size;
   1156	MTHCA_GET(size, outbox, QUERY_DEV_LIM_QPC_ENTRY_SZ_OFFSET);
   1157	dev_lim->qpc_entry_sz = size;
   1158	MTHCA_GET(size, outbox, QUERY_DEV_LIM_EEEC_ENTRY_SZ_OFFSET);
   1159	dev_lim->eeec_entry_sz = size;
   1160	MTHCA_GET(size, outbox, QUERY_DEV_LIM_EQPC_ENTRY_SZ_OFFSET);
   1161	dev_lim->eqpc_entry_sz = size;
   1162	MTHCA_GET(size, outbox, QUERY_DEV_LIM_EQC_ENTRY_SZ_OFFSET);
   1163	dev_lim->eqc_entry_sz = size;
   1164	MTHCA_GET(size, outbox, QUERY_DEV_LIM_CQC_ENTRY_SZ_OFFSET);
   1165	dev_lim->cqc_entry_sz = size;
   1166	MTHCA_GET(size, outbox, QUERY_DEV_LIM_SRQ_ENTRY_SZ_OFFSET);
   1167	dev_lim->srq_entry_sz = size;
   1168	MTHCA_GET(size, outbox, QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET);
   1169	dev_lim->uar_scratch_entry_sz = size;
   1170
   1171	if (mthca_is_memfree(dev)) {
   1172		MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
   1173		dev_lim->max_srq_sz = 1 << field;
   1174		MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
   1175		dev_lim->max_qp_sz = 1 << field;
   1176		MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSZ_SRQ_OFFSET);
   1177		dev_lim->hca.arbel.resize_srq = field & 1;
   1178		MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET);
   1179		dev_lim->max_sg = min_t(int, field, dev_lim->max_sg);
   1180		MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET);
   1181		dev_lim->max_desc_sz = min_t(int, size, dev_lim->max_desc_sz);
   1182		MTHCA_GET(size, outbox, QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET);
   1183		dev_lim->mpt_entry_sz = size;
   1184		MTHCA_GET(field, outbox, QUERY_DEV_LIM_PBL_SZ_OFFSET);
   1185		dev_lim->hca.arbel.max_pbl_sz = 1 << (field & 0x3f);
   1186		MTHCA_GET(dev_lim->hca.arbel.bmme_flags, outbox,
   1187			  QUERY_DEV_LIM_BMME_FLAGS_OFFSET);
   1188		MTHCA_GET(dev_lim->hca.arbel.reserved_lkey, outbox,
   1189			  QUERY_DEV_LIM_RSVD_LKEY_OFFSET);
   1190		MTHCA_GET(field, outbox, QUERY_DEV_LIM_LAMR_OFFSET);
   1191		dev_lim->hca.arbel.lam_required = field & 1;
   1192		MTHCA_GET(dev_lim->hca.arbel.max_icm_sz, outbox,
   1193			  QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET);
   1194
   1195		if (dev_lim->hca.arbel.bmme_flags & 1)
   1196			mthca_dbg(dev, "Base MM extensions: yes "
   1197				  "(flags %d, max PBL %d, rsvd L_Key %08x)\n",
   1198				  dev_lim->hca.arbel.bmme_flags,
   1199				  dev_lim->hca.arbel.max_pbl_sz,
   1200				  dev_lim->hca.arbel.reserved_lkey);
   1201		else
   1202			mthca_dbg(dev, "Base MM extensions: no\n");
   1203
   1204		mthca_dbg(dev, "Max ICM size %lld MB\n",
   1205			  (unsigned long long) dev_lim->hca.arbel.max_icm_sz >> 20);
   1206	} else {
   1207		MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
   1208		dev_lim->max_srq_sz = (1 << field) - 1;
   1209		MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
   1210		dev_lim->max_qp_sz = (1 << field) - 1;
   1211		MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_AV_OFFSET);
   1212		dev_lim->hca.tavor.max_avs = 1 << (field & 0x3f);
   1213		dev_lim->mpt_entry_sz = MTHCA_MPT_ENTRY_SIZE;
   1214	}
   1215
   1216	mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
   1217		  dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz);
   1218	mthca_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
   1219		  dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz);
   1220	mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
   1221		  dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz);
   1222	mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
   1223		  dev_lim->max_eqs, dev_lim->reserved_eqs, dev_lim->eqc_entry_sz);
   1224	mthca_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
   1225		  dev_lim->reserved_mrws, dev_lim->reserved_mtts);
   1226	mthca_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
   1227		  dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars);
   1228	mthca_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
   1229		  dev_lim->max_pds, dev_lim->reserved_mgms);
   1230	mthca_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
   1231		  dev_lim->max_cq_sz, dev_lim->max_qp_sz, dev_lim->max_srq_sz);
   1232
   1233	mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags);
   1234
   1235out:
   1236	mthca_free_mailbox(dev, mailbox);
   1237	return err;
   1238}
   1239
   1240static void get_board_id(void *vsd, char *board_id)
   1241{
   1242	int i;
   1243
   1244#define VSD_OFFSET_SIG1		0x00
   1245#define VSD_OFFSET_SIG2		0xde
   1246#define VSD_OFFSET_MLX_BOARD_ID	0xd0
   1247#define VSD_OFFSET_TS_BOARD_ID	0x20
   1248
   1249#define VSD_SIGNATURE_TOPSPIN	0x5ad
   1250
   1251	memset(board_id, 0, MTHCA_BOARD_ID_LEN);
   1252
   1253	if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
   1254	    be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
   1255		strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MTHCA_BOARD_ID_LEN);
   1256	} else {
   1257		/*
   1258		 * The board ID is a string but the firmware byte
   1259		 * swaps each 4-byte word before passing it back to
   1260		 * us.  Therefore we need to swab it before printing.
   1261		 */
   1262		for (i = 0; i < 4; ++i)
   1263			((u32 *) board_id)[i] =
   1264				swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
   1265	}
   1266}
   1267
   1268int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
   1269			struct mthca_adapter *adapter)
   1270{
   1271	struct mthca_mailbox *mailbox;
   1272	u32 *outbox;
   1273	int err;
   1274
   1275#define QUERY_ADAPTER_OUT_SIZE             0x100
   1276#define QUERY_ADAPTER_VENDOR_ID_OFFSET     0x00
   1277#define QUERY_ADAPTER_DEVICE_ID_OFFSET     0x04
   1278#define QUERY_ADAPTER_REVISION_ID_OFFSET   0x08
   1279#define QUERY_ADAPTER_INTA_PIN_OFFSET      0x10
   1280#define QUERY_ADAPTER_VSD_OFFSET           0x20
   1281
   1282	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
   1283	if (IS_ERR(mailbox))
   1284		return PTR_ERR(mailbox);
   1285	outbox = mailbox->buf;
   1286
   1287	err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_ADAPTER,
   1288			    CMD_TIME_CLASS_A);
   1289
   1290	if (err)
   1291		goto out;
   1292
   1293	if (!mthca_is_memfree(dev)) {
   1294		MTHCA_GET(adapter->vendor_id, outbox,
   1295			  QUERY_ADAPTER_VENDOR_ID_OFFSET);
   1296		MTHCA_GET(adapter->device_id, outbox,
   1297			  QUERY_ADAPTER_DEVICE_ID_OFFSET);
   1298		MTHCA_GET(adapter->revision_id, outbox,
   1299			  QUERY_ADAPTER_REVISION_ID_OFFSET);
   1300	}
   1301	MTHCA_GET(adapter->inta_pin, outbox,    QUERY_ADAPTER_INTA_PIN_OFFSET);
   1302
   1303	get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
   1304		     adapter->board_id);
   1305
   1306out:
   1307	mthca_free_mailbox(dev, mailbox);
   1308	return err;
   1309}
   1310
   1311int mthca_INIT_HCA(struct mthca_dev *dev,
   1312		   struct mthca_init_hca_param *param)
   1313{
   1314	struct mthca_mailbox *mailbox;
   1315	__be32 *inbox;
   1316	int err;
   1317
   1318#define INIT_HCA_IN_SIZE             	 0x200
   1319#define INIT_HCA_FLAGS1_OFFSET           0x00c
   1320#define INIT_HCA_FLAGS2_OFFSET           0x014
   1321#define INIT_HCA_QPC_OFFSET          	 0x020
   1322#define  INIT_HCA_QPC_BASE_OFFSET    	 (INIT_HCA_QPC_OFFSET + 0x10)
   1323#define  INIT_HCA_LOG_QP_OFFSET      	 (INIT_HCA_QPC_OFFSET + 0x17)
   1324#define  INIT_HCA_EEC_BASE_OFFSET    	 (INIT_HCA_QPC_OFFSET + 0x20)
   1325#define  INIT_HCA_LOG_EEC_OFFSET     	 (INIT_HCA_QPC_OFFSET + 0x27)
   1326#define  INIT_HCA_SRQC_BASE_OFFSET   	 (INIT_HCA_QPC_OFFSET + 0x28)
   1327#define  INIT_HCA_LOG_SRQ_OFFSET     	 (INIT_HCA_QPC_OFFSET + 0x2f)
   1328#define  INIT_HCA_CQC_BASE_OFFSET    	 (INIT_HCA_QPC_OFFSET + 0x30)
   1329#define  INIT_HCA_LOG_CQ_OFFSET      	 (INIT_HCA_QPC_OFFSET + 0x37)
   1330#define  INIT_HCA_EQPC_BASE_OFFSET   	 (INIT_HCA_QPC_OFFSET + 0x40)
   1331#define  INIT_HCA_EEEC_BASE_OFFSET   	 (INIT_HCA_QPC_OFFSET + 0x50)
   1332#define  INIT_HCA_EQC_BASE_OFFSET    	 (INIT_HCA_QPC_OFFSET + 0x60)
   1333#define  INIT_HCA_LOG_EQ_OFFSET      	 (INIT_HCA_QPC_OFFSET + 0x67)
   1334#define  INIT_HCA_RDB_BASE_OFFSET    	 (INIT_HCA_QPC_OFFSET + 0x70)
   1335#define INIT_HCA_UDAV_OFFSET         	 0x0b0
   1336#define  INIT_HCA_UDAV_LKEY_OFFSET   	 (INIT_HCA_UDAV_OFFSET + 0x0)
   1337#define  INIT_HCA_UDAV_PD_OFFSET     	 (INIT_HCA_UDAV_OFFSET + 0x4)
   1338#define INIT_HCA_MCAST_OFFSET        	 0x0c0
   1339#define  INIT_HCA_MC_BASE_OFFSET         (INIT_HCA_MCAST_OFFSET + 0x00)
   1340#define  INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
   1341#define  INIT_HCA_MC_HASH_SZ_OFFSET      (INIT_HCA_MCAST_OFFSET + 0x16)
   1342#define  INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
   1343#define INIT_HCA_TPT_OFFSET              0x0f0
   1344#define  INIT_HCA_MPT_BASE_OFFSET        (INIT_HCA_TPT_OFFSET + 0x00)
   1345#define  INIT_HCA_MTT_SEG_SZ_OFFSET      (INIT_HCA_TPT_OFFSET + 0x09)
   1346#define  INIT_HCA_LOG_MPT_SZ_OFFSET      (INIT_HCA_TPT_OFFSET + 0x0b)
   1347#define  INIT_HCA_MTT_BASE_OFFSET        (INIT_HCA_TPT_OFFSET + 0x10)
   1348#define INIT_HCA_UAR_OFFSET              0x120
   1349#define  INIT_HCA_UAR_BASE_OFFSET        (INIT_HCA_UAR_OFFSET + 0x00)
   1350#define  INIT_HCA_UARC_SZ_OFFSET         (INIT_HCA_UAR_OFFSET + 0x09)
   1351#define  INIT_HCA_LOG_UAR_SZ_OFFSET      (INIT_HCA_UAR_OFFSET + 0x0a)
   1352#define  INIT_HCA_UAR_PAGE_SZ_OFFSET     (INIT_HCA_UAR_OFFSET + 0x0b)
   1353#define  INIT_HCA_UAR_SCATCH_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x10)
   1354#define  INIT_HCA_UAR_CTX_BASE_OFFSET    (INIT_HCA_UAR_OFFSET + 0x18)
   1355
   1356	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
   1357	if (IS_ERR(mailbox))
   1358		return PTR_ERR(mailbox);
   1359	inbox = mailbox->buf;
   1360
   1361	memset(inbox, 0, INIT_HCA_IN_SIZE);
   1362
   1363	if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
   1364		MTHCA_PUT(inbox, 0x1, INIT_HCA_FLAGS1_OFFSET);
   1365
   1366#if defined(__LITTLE_ENDIAN)
   1367	*(inbox + INIT_HCA_FLAGS2_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
   1368#elif defined(__BIG_ENDIAN)
   1369	*(inbox + INIT_HCA_FLAGS2_OFFSET / 4) |= cpu_to_be32(1 << 1);
   1370#else
   1371#error Host endianness not defined
   1372#endif
   1373	/* Check port for UD address vector: */
   1374	*(inbox + INIT_HCA_FLAGS2_OFFSET / 4) |= cpu_to_be32(1);
   1375
   1376	/* Enable IPoIB checksumming if we can: */
   1377	if (dev->device_cap_flags & IB_DEVICE_UD_IP_CSUM)
   1378		*(inbox + INIT_HCA_FLAGS2_OFFSET / 4) |= cpu_to_be32(7 << 3);
   1379
   1380	/* We leave wqe_quota, responder_exu, etc as 0 (default) */
   1381
   1382	/* QPC/EEC/CQC/EQC/RDB attributes */
   1383
   1384	MTHCA_PUT(inbox, param->qpc_base,     INIT_HCA_QPC_BASE_OFFSET);
   1385	MTHCA_PUT(inbox, param->log_num_qps,  INIT_HCA_LOG_QP_OFFSET);
   1386	MTHCA_PUT(inbox, param->eec_base,     INIT_HCA_EEC_BASE_OFFSET);
   1387	MTHCA_PUT(inbox, param->log_num_eecs, INIT_HCA_LOG_EEC_OFFSET);
   1388	MTHCA_PUT(inbox, param->srqc_base,    INIT_HCA_SRQC_BASE_OFFSET);
   1389	MTHCA_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET);
   1390	MTHCA_PUT(inbox, param->cqc_base,     INIT_HCA_CQC_BASE_OFFSET);
   1391	MTHCA_PUT(inbox, param->log_num_cqs,  INIT_HCA_LOG_CQ_OFFSET);
   1392	MTHCA_PUT(inbox, param->eqpc_base,    INIT_HCA_EQPC_BASE_OFFSET);
   1393	MTHCA_PUT(inbox, param->eeec_base,    INIT_HCA_EEEC_BASE_OFFSET);
   1394	MTHCA_PUT(inbox, param->eqc_base,     INIT_HCA_EQC_BASE_OFFSET);
   1395	MTHCA_PUT(inbox, param->log_num_eqs,  INIT_HCA_LOG_EQ_OFFSET);
   1396	MTHCA_PUT(inbox, param->rdb_base,     INIT_HCA_RDB_BASE_OFFSET);
   1397
   1398	/* UD AV attributes */
   1399
   1400	/* multicast attributes */
   1401
   1402	MTHCA_PUT(inbox, param->mc_base,         INIT_HCA_MC_BASE_OFFSET);
   1403	MTHCA_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
   1404	MTHCA_PUT(inbox, param->mc_hash_sz,      INIT_HCA_MC_HASH_SZ_OFFSET);
   1405	MTHCA_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
   1406
   1407	/* TPT attributes */
   1408
   1409	MTHCA_PUT(inbox, param->mpt_base,   INIT_HCA_MPT_BASE_OFFSET);
   1410	if (!mthca_is_memfree(dev))
   1411		MTHCA_PUT(inbox, param->mtt_seg_sz, INIT_HCA_MTT_SEG_SZ_OFFSET);
   1412	MTHCA_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
   1413	MTHCA_PUT(inbox, param->mtt_base,   INIT_HCA_MTT_BASE_OFFSET);
   1414
   1415	/* UAR attributes */
   1416	{
   1417		u8 uar_page_sz = PAGE_SHIFT - 12;
   1418		MTHCA_PUT(inbox, uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
   1419	}
   1420
   1421	MTHCA_PUT(inbox, param->uar_scratch_base, INIT_HCA_UAR_SCATCH_BASE_OFFSET);
   1422
   1423	if (mthca_is_memfree(dev)) {
   1424		MTHCA_PUT(inbox, param->log_uarc_sz, INIT_HCA_UARC_SZ_OFFSET);
   1425		MTHCA_PUT(inbox, param->log_uar_sz,  INIT_HCA_LOG_UAR_SZ_OFFSET);
   1426		MTHCA_PUT(inbox, param->uarc_base,   INIT_HCA_UAR_CTX_BASE_OFFSET);
   1427	}
   1428
   1429	err = mthca_cmd(dev, mailbox->dma, 0, 0,
   1430			CMD_INIT_HCA, CMD_TIME_CLASS_D);
   1431
   1432	mthca_free_mailbox(dev, mailbox);
   1433	return err;
   1434}
   1435
   1436int mthca_INIT_IB(struct mthca_dev *dev,
   1437		  struct mthca_init_ib_param *param,
   1438		  int port)
   1439{
   1440	struct mthca_mailbox *mailbox;
   1441	u32 *inbox;
   1442	int err;
   1443	u32 flags;
   1444
   1445#define INIT_IB_IN_SIZE          56
   1446#define INIT_IB_FLAGS_OFFSET     0x00
   1447#define INIT_IB_FLAG_SIG         (1 << 18)
   1448#define INIT_IB_FLAG_NG          (1 << 17)
   1449#define INIT_IB_FLAG_G0          (1 << 16)
   1450#define INIT_IB_VL_SHIFT         4
   1451#define INIT_IB_PORT_WIDTH_SHIFT 8
   1452#define INIT_IB_MTU_SHIFT        12
   1453#define INIT_IB_MAX_GID_OFFSET   0x06
   1454#define INIT_IB_MAX_PKEY_OFFSET  0x0a
   1455#define INIT_IB_GUID0_OFFSET     0x10
   1456#define INIT_IB_NODE_GUID_OFFSET 0x18
   1457#define INIT_IB_SI_GUID_OFFSET   0x20
   1458
   1459	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
   1460	if (IS_ERR(mailbox))
   1461		return PTR_ERR(mailbox);
   1462	inbox = mailbox->buf;
   1463
   1464	memset(inbox, 0, INIT_IB_IN_SIZE);
   1465
   1466	flags = 0;
   1467	flags |= param->set_guid0     ? INIT_IB_FLAG_G0  : 0;
   1468	flags |= param->set_node_guid ? INIT_IB_FLAG_NG  : 0;
   1469	flags |= param->set_si_guid   ? INIT_IB_FLAG_SIG : 0;
   1470	flags |= param->vl_cap << INIT_IB_VL_SHIFT;
   1471	flags |= param->port_width << INIT_IB_PORT_WIDTH_SHIFT;
   1472	flags |= param->mtu_cap << INIT_IB_MTU_SHIFT;
   1473	MTHCA_PUT(inbox, flags, INIT_IB_FLAGS_OFFSET);
   1474
   1475	MTHCA_PUT(inbox, param->gid_cap,   INIT_IB_MAX_GID_OFFSET);
   1476	MTHCA_PUT(inbox, param->pkey_cap,  INIT_IB_MAX_PKEY_OFFSET);
   1477	MTHCA_PUT(inbox, param->guid0,     INIT_IB_GUID0_OFFSET);
   1478	MTHCA_PUT(inbox, param->node_guid, INIT_IB_NODE_GUID_OFFSET);
   1479	MTHCA_PUT(inbox, param->si_guid,   INIT_IB_SI_GUID_OFFSET);
   1480
   1481	err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_INIT_IB,
   1482			CMD_TIME_CLASS_A);
   1483
   1484	mthca_free_mailbox(dev, mailbox);
   1485	return err;
   1486}
   1487
   1488int mthca_CLOSE_IB(struct mthca_dev *dev, int port)
   1489{
   1490	return mthca_cmd(dev, 0, port, 0, CMD_CLOSE_IB, CMD_TIME_CLASS_A);
   1491}
   1492
   1493int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic)
   1494{
   1495	return mthca_cmd(dev, 0, 0, panic, CMD_CLOSE_HCA, CMD_TIME_CLASS_C);
   1496}
   1497
   1498int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
   1499		 int port)
   1500{
   1501	struct mthca_mailbox *mailbox;
   1502	u32 *inbox;
   1503	int err;
   1504	u32 flags = 0;
   1505
   1506#define SET_IB_IN_SIZE         0x40
   1507#define SET_IB_FLAGS_OFFSET    0x00
   1508#define SET_IB_FLAG_SIG        (1 << 18)
   1509#define SET_IB_FLAG_RQK        (1 <<  0)
   1510#define SET_IB_CAP_MASK_OFFSET 0x04
   1511#define SET_IB_SI_GUID_OFFSET  0x08
   1512
   1513	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
   1514	if (IS_ERR(mailbox))
   1515		return PTR_ERR(mailbox);
   1516	inbox = mailbox->buf;
   1517
   1518	memset(inbox, 0, SET_IB_IN_SIZE);
   1519
   1520	flags |= param->set_si_guid     ? SET_IB_FLAG_SIG : 0;
   1521	flags |= param->reset_qkey_viol ? SET_IB_FLAG_RQK : 0;
   1522	MTHCA_PUT(inbox, flags, SET_IB_FLAGS_OFFSET);
   1523
   1524	MTHCA_PUT(inbox, param->cap_mask, SET_IB_CAP_MASK_OFFSET);
   1525	MTHCA_PUT(inbox, param->si_guid,  SET_IB_SI_GUID_OFFSET);
   1526
   1527	err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_SET_IB,
   1528			CMD_TIME_CLASS_B);
   1529
   1530	mthca_free_mailbox(dev, mailbox);
   1531	return err;
   1532}
   1533
   1534int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt)
   1535{
   1536	return mthca_map_cmd(dev, CMD_MAP_ICM, icm, virt);
   1537}
   1538
   1539int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt)
   1540{
   1541	struct mthca_mailbox *mailbox;
   1542	__be64 *inbox;
   1543	int err;
   1544
   1545	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
   1546	if (IS_ERR(mailbox))
   1547		return PTR_ERR(mailbox);
   1548	inbox = mailbox->buf;
   1549
   1550	inbox[0] = cpu_to_be64(virt);
   1551	inbox[1] = cpu_to_be64(dma_addr);
   1552
   1553	err = mthca_cmd(dev, mailbox->dma, 1, 0, CMD_MAP_ICM,
   1554			CMD_TIME_CLASS_B);
   1555
   1556	mthca_free_mailbox(dev, mailbox);
   1557
   1558	if (!err)
   1559		mthca_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
   1560			  (unsigned long long) dma_addr, (unsigned long long) virt);
   1561
   1562	return err;
   1563}
   1564
   1565int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count)
   1566{
   1567	mthca_dbg(dev, "Unmapping %d pages at %llx from ICM.\n",
   1568		  page_count, (unsigned long long) virt);
   1569
   1570	return mthca_cmd(dev, virt, page_count, 0,
   1571			CMD_UNMAP_ICM, CMD_TIME_CLASS_B);
   1572}
   1573
   1574int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm)
   1575{
   1576	return mthca_map_cmd(dev, CMD_MAP_ICM_AUX, icm, -1);
   1577}
   1578
   1579int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev)
   1580{
   1581	return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_ICM_AUX, CMD_TIME_CLASS_B);
   1582}
   1583
   1584int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages)
   1585{
   1586	int ret = mthca_cmd_imm(dev, icm_size, aux_pages, 0,
   1587			0, CMD_SET_ICM_SIZE, CMD_TIME_CLASS_A);
   1588
   1589	if (ret)
   1590		return ret;
   1591
   1592	/*
   1593	 * Round up number of system pages needed in case
   1594	 * MTHCA_ICM_PAGE_SIZE < PAGE_SIZE.
   1595	 */
   1596	*aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MTHCA_ICM_PAGE_SIZE) >>
   1597		(PAGE_SHIFT - MTHCA_ICM_PAGE_SHIFT);
   1598
   1599	return 0;
   1600}
   1601
   1602int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
   1603		    int mpt_index)
   1604{
   1605	return mthca_cmd(dev, mailbox->dma, mpt_index, 0, CMD_SW2HW_MPT,
   1606			 CMD_TIME_CLASS_B);
   1607}
   1608
   1609int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
   1610		    int mpt_index)
   1611{
   1612	return mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
   1613			     !mailbox, CMD_HW2SW_MPT,
   1614			     CMD_TIME_CLASS_B);
   1615}
   1616
   1617int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
   1618		    int num_mtt)
   1619{
   1620	return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
   1621			 CMD_TIME_CLASS_B);
   1622}
   1623
   1624int mthca_SYNC_TPT(struct mthca_dev *dev)
   1625{
   1626	return mthca_cmd(dev, 0, 0, 0, CMD_SYNC_TPT, CMD_TIME_CLASS_B);
   1627}
   1628
   1629int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
   1630		 int eq_num)
   1631{
   1632	mthca_dbg(dev, "%s mask %016llx for eqn %d\n",
   1633		  unmap ? "Clearing" : "Setting",
   1634		  (unsigned long long) event_mask, eq_num);
   1635	return mthca_cmd(dev, event_mask, (unmap << 31) | eq_num,
   1636			 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
   1637}
   1638
   1639int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
   1640		   int eq_num)
   1641{
   1642	return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
   1643			 CMD_TIME_CLASS_A);
   1644}
   1645
   1646int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
   1647		   int eq_num)
   1648{
   1649	return mthca_cmd_box(dev, 0, mailbox->dma, eq_num, 0,
   1650			     CMD_HW2SW_EQ,
   1651			     CMD_TIME_CLASS_A);
   1652}
   1653
   1654int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
   1655		   int cq_num)
   1656{
   1657	return mthca_cmd(dev, mailbox->dma, cq_num, 0, CMD_SW2HW_CQ,
   1658			CMD_TIME_CLASS_A);
   1659}
   1660
   1661int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
   1662		   int cq_num)
   1663{
   1664	return mthca_cmd_box(dev, 0, mailbox->dma, cq_num, 0,
   1665			     CMD_HW2SW_CQ,
   1666			     CMD_TIME_CLASS_A);
   1667}
   1668
   1669int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size)
   1670{
   1671	struct mthca_mailbox *mailbox;
   1672	__be32 *inbox;
   1673	int err;
   1674
   1675#define RESIZE_CQ_IN_SIZE		0x40
   1676#define RESIZE_CQ_LOG_SIZE_OFFSET	0x0c
   1677#define RESIZE_CQ_LKEY_OFFSET		0x1c
   1678
   1679	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
   1680	if (IS_ERR(mailbox))
   1681		return PTR_ERR(mailbox);
   1682	inbox = mailbox->buf;
   1683
   1684	memset(inbox, 0, RESIZE_CQ_IN_SIZE);
   1685	/*
   1686	 * Leave start address fields zeroed out -- mthca assumes that
   1687	 * MRs for CQs always start at virtual address 0.
   1688	 */
   1689	MTHCA_PUT(inbox, log_size, RESIZE_CQ_LOG_SIZE_OFFSET);
   1690	MTHCA_PUT(inbox, lkey,     RESIZE_CQ_LKEY_OFFSET);
   1691
   1692	err = mthca_cmd(dev, mailbox->dma, cq_num, 1, CMD_RESIZE_CQ,
   1693			CMD_TIME_CLASS_B);
   1694
   1695	mthca_free_mailbox(dev, mailbox);
   1696	return err;
   1697}
   1698
   1699int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
   1700		    int srq_num)
   1701{
   1702	return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ,
   1703			CMD_TIME_CLASS_A);
   1704}
   1705
   1706int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
   1707		    int srq_num)
   1708{
   1709	return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0,
   1710			     CMD_HW2SW_SRQ,
   1711			     CMD_TIME_CLASS_A);
   1712}
   1713
   1714int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num,
   1715		    struct mthca_mailbox *mailbox)
   1716{
   1717	return mthca_cmd_box(dev, 0, mailbox->dma, num, 0,
   1718			     CMD_QUERY_SRQ, CMD_TIME_CLASS_A);
   1719}
   1720
   1721int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit)
   1722{
   1723	return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ,
   1724			 CMD_TIME_CLASS_B);
   1725}
   1726
   1727int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur,
   1728		    enum ib_qp_state next, u32 num, int is_ee,
   1729		    struct mthca_mailbox *mailbox, u32 optmask)
   1730{
   1731	static const u16 op[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
   1732		[IB_QPS_RESET] = {
   1733			[IB_QPS_RESET]	= CMD_ERR2RST_QPEE,
   1734			[IB_QPS_ERR]	= CMD_2ERR_QPEE,
   1735			[IB_QPS_INIT]	= CMD_RST2INIT_QPEE,
   1736		},
   1737		[IB_QPS_INIT]  = {
   1738			[IB_QPS_RESET]	= CMD_ERR2RST_QPEE,
   1739			[IB_QPS_ERR]	= CMD_2ERR_QPEE,
   1740			[IB_QPS_INIT]	= CMD_INIT2INIT_QPEE,
   1741			[IB_QPS_RTR]	= CMD_INIT2RTR_QPEE,
   1742		},
   1743		[IB_QPS_RTR]   = {
   1744			[IB_QPS_RESET]	= CMD_ERR2RST_QPEE,
   1745			[IB_QPS_ERR]	= CMD_2ERR_QPEE,
   1746			[IB_QPS_RTS]	= CMD_RTR2RTS_QPEE,
   1747		},
   1748		[IB_QPS_RTS]   = {
   1749			[IB_QPS_RESET]	= CMD_ERR2RST_QPEE,
   1750			[IB_QPS_ERR]	= CMD_2ERR_QPEE,
   1751			[IB_QPS_RTS]	= CMD_RTS2RTS_QPEE,
   1752			[IB_QPS_SQD]	= CMD_RTS2SQD_QPEE,
   1753		},
   1754		[IB_QPS_SQD] = {
   1755			[IB_QPS_RESET]	= CMD_ERR2RST_QPEE,
   1756			[IB_QPS_ERR]	= CMD_2ERR_QPEE,
   1757			[IB_QPS_RTS]	= CMD_SQD2RTS_QPEE,
   1758			[IB_QPS_SQD]	= CMD_SQD2SQD_QPEE,
   1759		},
   1760		[IB_QPS_SQE] = {
   1761			[IB_QPS_RESET]	= CMD_ERR2RST_QPEE,
   1762			[IB_QPS_ERR]	= CMD_2ERR_QPEE,
   1763			[IB_QPS_RTS]	= CMD_SQERR2RTS_QPEE,
   1764		},
   1765		[IB_QPS_ERR] = {
   1766			[IB_QPS_RESET]	= CMD_ERR2RST_QPEE,
   1767			[IB_QPS_ERR]	= CMD_2ERR_QPEE,
   1768		}
   1769	};
   1770
   1771	u8 op_mod = 0;
   1772	int my_mailbox = 0;
   1773	int err;
   1774
   1775	if (op[cur][next] == CMD_ERR2RST_QPEE) {
   1776		op_mod = 3;	/* don't write outbox, any->reset */
   1777
   1778		/* For debugging */
   1779		if (!mailbox) {
   1780			mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
   1781			if (!IS_ERR(mailbox)) {
   1782				my_mailbox = 1;
   1783				op_mod     = 2;	/* write outbox, any->reset */
   1784			} else
   1785				mailbox = NULL;
   1786		}
   1787
   1788		err = mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
   1789				    (!!is_ee << 24) | num, op_mod,
   1790				    op[cur][next], CMD_TIME_CLASS_C);
   1791
   1792		if (0 && mailbox) {
   1793			int i;
   1794			mthca_dbg(dev, "Dumping QP context:\n");
   1795			printk(" %08x\n", be32_to_cpup(mailbox->buf));
   1796			for (i = 0; i < 0x100 / 4; ++i) {
   1797				if (i % 8 == 0)
   1798					printk("[%02x] ", i * 4);
   1799				printk(" %08x",
   1800				       be32_to_cpu(((__be32 *) mailbox->buf)[i + 2]));
   1801				if ((i + 1) % 8 == 0)
   1802					printk("\n");
   1803			}
   1804		}
   1805
   1806		if (my_mailbox)
   1807			mthca_free_mailbox(dev, mailbox);
   1808	} else {
   1809		if (0) {
   1810			int i;
   1811			mthca_dbg(dev, "Dumping QP context:\n");
   1812			printk("  opt param mask: %08x\n", be32_to_cpup(mailbox->buf));
   1813			for (i = 0; i < 0x100 / 4; ++i) {
   1814				if (i % 8 == 0)
   1815					printk("  [%02x] ", i * 4);
   1816				printk(" %08x",
   1817				       be32_to_cpu(((__be32 *) mailbox->buf)[i + 2]));
   1818				if ((i + 1) % 8 == 0)
   1819					printk("\n");
   1820			}
   1821		}
   1822
   1823		err = mthca_cmd(dev, mailbox->dma, optmask | (!!is_ee << 24) | num,
   1824				op_mod, op[cur][next], CMD_TIME_CLASS_C);
   1825	}
   1826
   1827	return err;
   1828}
   1829
   1830int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee,
   1831		   struct mthca_mailbox *mailbox)
   1832{
   1833	return mthca_cmd_box(dev, 0, mailbox->dma, (!!is_ee << 24) | num, 0,
   1834			     CMD_QUERY_QPEE, CMD_TIME_CLASS_A);
   1835}
   1836
   1837int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
   1838{
   1839	u8 op_mod;
   1840
   1841	switch (type) {
   1842	case IB_QPT_SMI:
   1843		op_mod = 0;
   1844		break;
   1845	case IB_QPT_GSI:
   1846		op_mod = 1;
   1847		break;
   1848	case IB_QPT_RAW_IPV6:
   1849		op_mod = 2;
   1850		break;
   1851	case IB_QPT_RAW_ETHERTYPE:
   1852		op_mod = 3;
   1853		break;
   1854	default:
   1855		return -EINVAL;
   1856	}
   1857
   1858	return mthca_cmd(dev, 0, qpn, op_mod, CMD_CONF_SPECIAL_QP,
   1859			 CMD_TIME_CLASS_B);
   1860}
   1861
   1862int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
   1863		  int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
   1864		  const void *in_mad, void *response_mad)
   1865{
   1866	struct mthca_mailbox *inmailbox, *outmailbox;
   1867	void *inbox;
   1868	int err;
   1869	u32 in_modifier = port;
   1870	u8 op_modifier = 0;
   1871
   1872#define MAD_IFC_BOX_SIZE      0x400
   1873#define MAD_IFC_MY_QPN_OFFSET 0x100
   1874#define MAD_IFC_RQPN_OFFSET   0x108
   1875#define MAD_IFC_SL_OFFSET     0x10c
   1876#define MAD_IFC_G_PATH_OFFSET 0x10d
   1877#define MAD_IFC_RLID_OFFSET   0x10e
   1878#define MAD_IFC_PKEY_OFFSET   0x112
   1879#define MAD_IFC_GRH_OFFSET    0x140
   1880
   1881	inmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
   1882	if (IS_ERR(inmailbox))
   1883		return PTR_ERR(inmailbox);
   1884	inbox = inmailbox->buf;
   1885
   1886	outmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
   1887	if (IS_ERR(outmailbox)) {
   1888		mthca_free_mailbox(dev, inmailbox);
   1889		return PTR_ERR(outmailbox);
   1890	}
   1891
   1892	memcpy(inbox, in_mad, 256);
   1893
   1894	/*
   1895	 * Key check traps can't be generated unless we have in_wc to
   1896	 * tell us where to send the trap.
   1897	 */
   1898	if (ignore_mkey || !in_wc)
   1899		op_modifier |= 0x1;
   1900	if (ignore_bkey || !in_wc)
   1901		op_modifier |= 0x2;
   1902
   1903	if (in_wc) {
   1904		u8 val;
   1905
   1906		memset(inbox + 256, 0, 256);
   1907
   1908		MTHCA_PUT(inbox, in_wc->qp->qp_num, MAD_IFC_MY_QPN_OFFSET);
   1909		MTHCA_PUT(inbox, in_wc->src_qp,     MAD_IFC_RQPN_OFFSET);
   1910
   1911		val = in_wc->sl << 4;
   1912		MTHCA_PUT(inbox, val,               MAD_IFC_SL_OFFSET);
   1913
   1914		val = in_wc->dlid_path_bits |
   1915			(in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
   1916		MTHCA_PUT(inbox, val,               MAD_IFC_G_PATH_OFFSET);
   1917
   1918		MTHCA_PUT(inbox, ib_lid_cpu16(in_wc->slid), MAD_IFC_RLID_OFFSET);
   1919		MTHCA_PUT(inbox, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET);
   1920
   1921		if (in_grh)
   1922			memcpy(inbox + MAD_IFC_GRH_OFFSET, in_grh, 40);
   1923
   1924		op_modifier |= 0x4;
   1925
   1926		in_modifier |= ib_lid_cpu16(in_wc->slid) << 16;
   1927	}
   1928
   1929	err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma,
   1930			    in_modifier, op_modifier,
   1931			    CMD_MAD_IFC, CMD_TIME_CLASS_C);
   1932
   1933	if (!err)
   1934		memcpy(response_mad, outmailbox->buf, 256);
   1935
   1936	mthca_free_mailbox(dev, inmailbox);
   1937	mthca_free_mailbox(dev, outmailbox);
   1938	return err;
   1939}
   1940
   1941int mthca_READ_MGM(struct mthca_dev *dev, int index,
   1942		   struct mthca_mailbox *mailbox)
   1943{
   1944	return mthca_cmd_box(dev, 0, mailbox->dma, index, 0,
   1945			     CMD_READ_MGM, CMD_TIME_CLASS_A);
   1946}
   1947
   1948int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
   1949		    struct mthca_mailbox *mailbox)
   1950{
   1951	return mthca_cmd(dev, mailbox->dma, index, 0, CMD_WRITE_MGM,
   1952			 CMD_TIME_CLASS_A);
   1953}
   1954
   1955int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
   1956		    u16 *hash)
   1957{
   1958	u64 imm;
   1959	int err;
   1960
   1961	err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH,
   1962			    CMD_TIME_CLASS_A);
   1963
   1964	*hash = imm;
   1965	return err;
   1966}
   1967
   1968int mthca_NOP(struct mthca_dev *dev)
   1969{
   1970	return mthca_cmd(dev, 0, 0x1f, 0, CMD_NOP, msecs_to_jiffies(100));
   1971}