mr.c (24190B)
1/* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35#include <linux/errno.h> 36#include <linux/export.h> 37#include <linux/slab.h> 38#include <linux/kernel.h> 39#include <linux/vmalloc.h> 40 41#include <linux/mlx4/cmd.h> 42 43#include "mlx4.h" 44#include "icm.h" 45 46static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order) 47{ 48 int o; 49 int m; 50 u32 seg; 51 52 spin_lock(&buddy->lock); 53 54 for (o = order; o <= buddy->max_order; ++o) 55 if (buddy->num_free[o]) { 56 m = 1 << (buddy->max_order - o); 57 seg = find_first_bit(buddy->bits[o], m); 58 if (seg < m) 59 goto found; 60 } 61 62 spin_unlock(&buddy->lock); 63 return -1; 64 65 found: 66 clear_bit(seg, buddy->bits[o]); 67 --buddy->num_free[o]; 68 69 while (o > order) { 70 --o; 71 seg <<= 1; 72 set_bit(seg ^ 1, buddy->bits[o]); 73 ++buddy->num_free[o]; 74 } 75 76 spin_unlock(&buddy->lock); 77 78 seg <<= order; 79 80 return seg; 81} 82 83static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order) 84{ 85 seg >>= order; 86 87 spin_lock(&buddy->lock); 88 89 while (test_bit(seg ^ 1, buddy->bits[order])) { 90 clear_bit(seg ^ 1, buddy->bits[order]); 91 --buddy->num_free[order]; 92 seg >>= 1; 93 ++order; 94 } 95 96 set_bit(seg, buddy->bits[order]); 97 ++buddy->num_free[order]; 98 99 spin_unlock(&buddy->lock); 100} 101 102static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) 103{ 104 int i, s; 105 106 buddy->max_order = max_order; 107 spin_lock_init(&buddy->lock); 108 109 buddy->bits = kcalloc(buddy->max_order + 1, sizeof(long *), 110 GFP_KERNEL); 111 buddy->num_free = kcalloc(buddy->max_order + 1, sizeof(*buddy->num_free), 112 GFP_KERNEL); 113 if (!buddy->bits || !buddy->num_free) 114 goto err_out; 115 116 for (i = 0; i <= buddy->max_order; ++i) { 117 s = BITS_TO_LONGS(1UL << (buddy->max_order - i)); 118 buddy->bits[i] = kvmalloc_array(s, sizeof(long), GFP_KERNEL | __GFP_ZERO); 119 if (!buddy->bits[i]) 120 goto err_out_free; 121 } 122 123 set_bit(0, buddy->bits[buddy->max_order]); 124 buddy->num_free[buddy->max_order] = 1; 125 126 return 0; 127 128err_out_free: 129 for (i = 0; i <= buddy->max_order; ++i) 130 kvfree(buddy->bits[i]); 131 132err_out: 133 kfree(buddy->bits); 134 kfree(buddy->num_free); 135 136 return -ENOMEM; 137} 138 139static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy) 140{ 141 int i; 142 143 for (i = 0; i <= buddy->max_order; ++i) 144 kvfree(buddy->bits[i]); 145 146 kfree(buddy->bits); 147 kfree(buddy->num_free); 148} 149 150u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) 151{ 152 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 153 u32 seg; 154 int seg_order; 155 u32 offset; 156 157 seg_order = max_t(int, order - log_mtts_per_seg, 0); 158 159 seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order); 160 if (seg == -1) 161 return -1; 162 163 offset = seg * (1 << log_mtts_per_seg); 164 165 if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset, 166 offset + (1 << order) - 1)) { 167 mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order); 168 return -1; 169 } 170 171 return offset; 172} 173 174static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) 175{ 176 u64 in_param = 0; 177 u64 out_param; 178 int err; 179 180 if (mlx4_is_mfunc(dev)) { 181 set_param_l(&in_param, order); 182 err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT, 183 RES_OP_RESERVE_AND_MAP, 184 MLX4_CMD_ALLOC_RES, 185 MLX4_CMD_TIME_CLASS_A, 186 MLX4_CMD_WRAPPED); 187 if (err) 188 return -1; 189 return get_param_l(&out_param); 190 } 191 return __mlx4_alloc_mtt_range(dev, order); 192} 193 194int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, 195 struct mlx4_mtt *mtt) 196{ 197 int i; 198 199 if (!npages) { 200 mtt->order = -1; 201 mtt->page_shift = MLX4_ICM_PAGE_SHIFT; 202 return 0; 203 } else 204 mtt->page_shift = page_shift; 205 206 for (mtt->order = 0, i = 1; i < npages; i <<= 1) 207 ++mtt->order; 208 209 mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order); 210 if (mtt->offset == -1) 211 return -ENOMEM; 212 213 return 0; 214} 215EXPORT_SYMBOL_GPL(mlx4_mtt_init); 216 217void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) 218{ 219 u32 first_seg; 220 int seg_order; 221 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 222 223 seg_order = max_t(int, order - log_mtts_per_seg, 0); 224 first_seg = offset / (1 << log_mtts_per_seg); 225 226 mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order); 227 mlx4_table_put_range(dev, &mr_table->mtt_table, offset, 228 offset + (1 << order) - 1); 229} 230 231static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) 232{ 233 u64 in_param = 0; 234 int err; 235 236 if (mlx4_is_mfunc(dev)) { 237 set_param_l(&in_param, offset); 238 set_param_h(&in_param, order); 239 err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP, 240 MLX4_CMD_FREE_RES, 241 MLX4_CMD_TIME_CLASS_A, 242 MLX4_CMD_WRAPPED); 243 if (err) 244 mlx4_warn(dev, "Failed to free mtt range at:%d order:%d\n", 245 offset, order); 246 return; 247 } 248 __mlx4_free_mtt_range(dev, offset, order); 249} 250 251void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) 252{ 253 if (mtt->order < 0) 254 return; 255 256 mlx4_free_mtt_range(dev, mtt->offset, mtt->order); 257} 258EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup); 259 260u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt) 261{ 262 return (u64) mtt->offset * dev->caps.mtt_entry_sz; 263} 264EXPORT_SYMBOL_GPL(mlx4_mtt_addr); 265 266static u32 hw_index_to_key(u32 ind) 267{ 268 return (ind >> 24) | (ind << 8); 269} 270 271static u32 key_to_hw_index(u32 key) 272{ 273 return (key << 24) | (key >> 8); 274} 275 276static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 277 int mpt_index) 278{ 279 return mlx4_cmd(dev, mailbox->dma, mpt_index, 280 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B, 281 MLX4_CMD_WRAPPED); 282} 283 284static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 285 int mpt_index) 286{ 287 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index, 288 !mailbox, MLX4_CMD_HW2SW_MPT, 289 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 290} 291 292/* Must protect against concurrent access */ 293int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, 294 struct mlx4_mpt_entry ***mpt_entry) 295{ 296 int err; 297 int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); 298 struct mlx4_cmd_mailbox *mailbox = NULL; 299 300 if (mmr->enabled != MLX4_MPT_EN_HW) 301 return -EINVAL; 302 303 err = mlx4_HW2SW_MPT(dev, NULL, key); 304 if (err) { 305 mlx4_warn(dev, "HW2SW_MPT failed (%d).", err); 306 mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n"); 307 return err; 308 } 309 310 mmr->enabled = MLX4_MPT_EN_SW; 311 312 if (!mlx4_is_mfunc(dev)) { 313 **mpt_entry = mlx4_table_find( 314 &mlx4_priv(dev)->mr_table.dmpt_table, 315 key, NULL); 316 } else { 317 mailbox = mlx4_alloc_cmd_mailbox(dev); 318 if (IS_ERR(mailbox)) 319 return PTR_ERR(mailbox); 320 321 err = mlx4_cmd_box(dev, 0, mailbox->dma, key, 322 0, MLX4_CMD_QUERY_MPT, 323 MLX4_CMD_TIME_CLASS_B, 324 MLX4_CMD_WRAPPED); 325 if (err) 326 goto free_mailbox; 327 328 *mpt_entry = (struct mlx4_mpt_entry **)&mailbox->buf; 329 } 330 331 if (!(*mpt_entry) || !(**mpt_entry)) { 332 err = -ENOMEM; 333 goto free_mailbox; 334 } 335 336 return 0; 337 338free_mailbox: 339 mlx4_free_cmd_mailbox(dev, mailbox); 340 return err; 341} 342EXPORT_SYMBOL_GPL(mlx4_mr_hw_get_mpt); 343 344int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, 345 struct mlx4_mpt_entry **mpt_entry) 346{ 347 int err; 348 349 if (!mlx4_is_mfunc(dev)) { 350 /* Make sure any changes to this entry are flushed */ 351 wmb(); 352 353 *(u8 *)(*mpt_entry) = MLX4_MPT_STATUS_HW; 354 355 /* Make sure the new status is written */ 356 wmb(); 357 358 err = mlx4_SYNC_TPT(dev); 359 } else { 360 int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); 361 362 struct mlx4_cmd_mailbox *mailbox = 363 container_of((void *)mpt_entry, struct mlx4_cmd_mailbox, 364 buf); 365 366 (*mpt_entry)->lkey = 0; 367 err = mlx4_SW2HW_MPT(dev, mailbox, key); 368 } 369 370 if (!err) { 371 mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK; 372 mmr->enabled = MLX4_MPT_EN_HW; 373 } 374 return err; 375} 376EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt); 377 378void mlx4_mr_hw_put_mpt(struct mlx4_dev *dev, 379 struct mlx4_mpt_entry **mpt_entry) 380{ 381 if (mlx4_is_mfunc(dev)) { 382 struct mlx4_cmd_mailbox *mailbox = 383 container_of((void *)mpt_entry, struct mlx4_cmd_mailbox, 384 buf); 385 mlx4_free_cmd_mailbox(dev, mailbox); 386 } 387} 388EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt); 389 390int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry, 391 u32 pdn) 392{ 393 u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags) & ~MLX4_MPT_PD_MASK; 394 /* The wrapper function will put the slave's id here */ 395 if (mlx4_is_mfunc(dev)) 396 pd_flags &= ~MLX4_MPT_PD_VF_MASK; 397 398 mpt_entry->pd_flags = cpu_to_be32(pd_flags | 399 (pdn & MLX4_MPT_PD_MASK) 400 | MLX4_MPT_PD_FLAG_EN_INV); 401 return 0; 402} 403EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_pd); 404 405int mlx4_mr_hw_change_access(struct mlx4_dev *dev, 406 struct mlx4_mpt_entry *mpt_entry, 407 u32 access) 408{ 409 u32 flags = (be32_to_cpu(mpt_entry->flags) & ~MLX4_PERM_MASK) | 410 (access & MLX4_PERM_MASK); 411 412 mpt_entry->flags = cpu_to_be32(flags); 413 return 0; 414} 415EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_access); 416 417static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, 418 u64 iova, u64 size, u32 access, int npages, 419 int page_shift, struct mlx4_mr *mr) 420{ 421 mr->iova = iova; 422 mr->size = size; 423 mr->pd = pd; 424 mr->access = access; 425 mr->enabled = MLX4_MPT_DISABLED; 426 mr->key = hw_index_to_key(mridx); 427 428 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); 429} 430 431static int mlx4_WRITE_MTT(struct mlx4_dev *dev, 432 struct mlx4_cmd_mailbox *mailbox, 433 int num_entries) 434{ 435 return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT, 436 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 437} 438 439int __mlx4_mpt_reserve(struct mlx4_dev *dev) 440{ 441 struct mlx4_priv *priv = mlx4_priv(dev); 442 443 return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap); 444} 445 446static int mlx4_mpt_reserve(struct mlx4_dev *dev) 447{ 448 u64 out_param; 449 450 if (mlx4_is_mfunc(dev)) { 451 if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE, 452 MLX4_CMD_ALLOC_RES, 453 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) 454 return -1; 455 return get_param_l(&out_param); 456 } 457 return __mlx4_mpt_reserve(dev); 458} 459 460void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index) 461{ 462 struct mlx4_priv *priv = mlx4_priv(dev); 463 464 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index, MLX4_NO_RR); 465} 466 467static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index) 468{ 469 u64 in_param = 0; 470 471 if (mlx4_is_mfunc(dev)) { 472 set_param_l(&in_param, index); 473 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE, 474 MLX4_CMD_FREE_RES, 475 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) 476 mlx4_warn(dev, "Failed to release mr index:%d\n", 477 index); 478 return; 479 } 480 __mlx4_mpt_release(dev, index); 481} 482 483int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) 484{ 485 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 486 487 return mlx4_table_get(dev, &mr_table->dmpt_table, index); 488} 489 490static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) 491{ 492 u64 param = 0; 493 494 if (mlx4_is_mfunc(dev)) { 495 set_param_l(¶m, index); 496 return mlx4_cmd_imm(dev, param, ¶m, RES_MPT, RES_OP_MAP_ICM, 497 MLX4_CMD_ALLOC_RES, 498 MLX4_CMD_TIME_CLASS_A, 499 MLX4_CMD_WRAPPED); 500 } 501 return __mlx4_mpt_alloc_icm(dev, index); 502} 503 504void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) 505{ 506 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 507 508 mlx4_table_put(dev, &mr_table->dmpt_table, index); 509} 510 511static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) 512{ 513 u64 in_param = 0; 514 515 if (mlx4_is_mfunc(dev)) { 516 set_param_l(&in_param, index); 517 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM, 518 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 519 MLX4_CMD_WRAPPED)) 520 mlx4_warn(dev, "Failed to free icm of mr index:%d\n", 521 index); 522 return; 523 } 524 return __mlx4_mpt_free_icm(dev, index); 525} 526 527int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, 528 int npages, int page_shift, struct mlx4_mr *mr) 529{ 530 u32 index; 531 int err; 532 533 index = mlx4_mpt_reserve(dev); 534 if (index == -1) 535 return -ENOMEM; 536 537 err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size, 538 access, npages, page_shift, mr); 539 if (err) 540 mlx4_mpt_release(dev, index); 541 542 return err; 543} 544EXPORT_SYMBOL_GPL(mlx4_mr_alloc); 545 546static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr) 547{ 548 int err; 549 550 if (mr->enabled == MLX4_MPT_EN_HW) { 551 err = mlx4_HW2SW_MPT(dev, NULL, 552 key_to_hw_index(mr->key) & 553 (dev->caps.num_mpts - 1)); 554 if (err) { 555 mlx4_warn(dev, "HW2SW_MPT failed (%d), MR has MWs bound to it\n", 556 err); 557 return err; 558 } 559 560 mr->enabled = MLX4_MPT_EN_SW; 561 } 562 mlx4_mtt_cleanup(dev, &mr->mtt); 563 564 return 0; 565} 566 567int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) 568{ 569 int ret; 570 571 ret = mlx4_mr_free_reserved(dev, mr); 572 if (ret) 573 return ret; 574 if (mr->enabled) 575 mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key)); 576 mlx4_mpt_release(dev, key_to_hw_index(mr->key)); 577 578 return 0; 579} 580EXPORT_SYMBOL_GPL(mlx4_mr_free); 581 582void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr) 583{ 584 mlx4_mtt_cleanup(dev, &mr->mtt); 585 mr->mtt.order = -1; 586} 587EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup); 588 589int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, 590 u64 iova, u64 size, int npages, 591 int page_shift, struct mlx4_mpt_entry *mpt_entry) 592{ 593 int err; 594 595 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); 596 if (err) 597 return err; 598 599 mpt_entry->start = cpu_to_be64(iova); 600 mpt_entry->length = cpu_to_be64(size); 601 mpt_entry->entity_size = cpu_to_be32(page_shift); 602 mpt_entry->flags &= ~(cpu_to_be32(MLX4_MPT_FLAG_FREE | 603 MLX4_MPT_FLAG_SW_OWNS)); 604 if (mr->mtt.order < 0) { 605 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); 606 mpt_entry->mtt_addr = 0; 607 } else { 608 mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev, 609 &mr->mtt)); 610 if (mr->mtt.page_shift == 0) 611 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); 612 } 613 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { 614 /* fast register MR in free state */ 615 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); 616 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | 617 MLX4_MPT_PD_FLAG_RAE); 618 } else { 619 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); 620 } 621 mr->enabled = MLX4_MPT_EN_SW; 622 623 return 0; 624} 625EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_write); 626 627int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) 628{ 629 struct mlx4_cmd_mailbox *mailbox; 630 struct mlx4_mpt_entry *mpt_entry; 631 int err; 632 633 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key)); 634 if (err) 635 return err; 636 637 mailbox = mlx4_alloc_cmd_mailbox(dev); 638 if (IS_ERR(mailbox)) { 639 err = PTR_ERR(mailbox); 640 goto err_table; 641 } 642 mpt_entry = mailbox->buf; 643 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO | 644 MLX4_MPT_FLAG_REGION | 645 mr->access); 646 647 mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key)); 648 mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV); 649 mpt_entry->start = cpu_to_be64(mr->iova); 650 mpt_entry->length = cpu_to_be64(mr->size); 651 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); 652 653 if (mr->mtt.order < 0) { 654 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); 655 mpt_entry->mtt_addr = 0; 656 } else { 657 mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev, 658 &mr->mtt)); 659 } 660 661 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { 662 /* fast register MR in free state */ 663 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); 664 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | 665 MLX4_MPT_PD_FLAG_RAE); 666 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); 667 } else { 668 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); 669 } 670 671 err = mlx4_SW2HW_MPT(dev, mailbox, 672 key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1)); 673 if (err) { 674 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); 675 goto err_cmd; 676 } 677 mr->enabled = MLX4_MPT_EN_HW; 678 679 mlx4_free_cmd_mailbox(dev, mailbox); 680 681 return 0; 682 683err_cmd: 684 mlx4_free_cmd_mailbox(dev, mailbox); 685 686err_table: 687 mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key)); 688 return err; 689} 690EXPORT_SYMBOL_GPL(mlx4_mr_enable); 691 692static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 693 int start_index, int npages, u64 *page_list) 694{ 695 struct mlx4_priv *priv = mlx4_priv(dev); 696 __be64 *mtts; 697 dma_addr_t dma_handle; 698 int i; 699 700 mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset + 701 start_index, &dma_handle); 702 703 if (!mtts) 704 return -ENOMEM; 705 706 dma_sync_single_for_cpu(&dev->persist->pdev->dev, dma_handle, 707 npages * sizeof(u64), DMA_TO_DEVICE); 708 709 for (i = 0; i < npages; ++i) 710 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 711 712 dma_sync_single_for_device(&dev->persist->pdev->dev, dma_handle, 713 npages * sizeof(u64), DMA_TO_DEVICE); 714 715 return 0; 716} 717 718int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 719 int start_index, int npages, u64 *page_list) 720{ 721 int err = 0; 722 int chunk; 723 int mtts_per_page; 724 int max_mtts_first_page; 725 726 /* compute how may mtts fit in the first page */ 727 mtts_per_page = PAGE_SIZE / sizeof(u64); 728 max_mtts_first_page = mtts_per_page - (mtt->offset + start_index) 729 % mtts_per_page; 730 731 chunk = min_t(int, max_mtts_first_page, npages); 732 733 while (npages > 0) { 734 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); 735 if (err) 736 return err; 737 npages -= chunk; 738 start_index += chunk; 739 page_list += chunk; 740 741 chunk = min_t(int, mtts_per_page, npages); 742 } 743 return err; 744} 745 746int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 747 int start_index, int npages, u64 *page_list) 748{ 749 struct mlx4_cmd_mailbox *mailbox = NULL; 750 __be64 *inbox = NULL; 751 int chunk; 752 int err = 0; 753 int i; 754 755 if (mtt->order < 0) 756 return -EINVAL; 757 758 if (mlx4_is_mfunc(dev)) { 759 mailbox = mlx4_alloc_cmd_mailbox(dev); 760 if (IS_ERR(mailbox)) 761 return PTR_ERR(mailbox); 762 inbox = mailbox->buf; 763 764 while (npages > 0) { 765 chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2, 766 npages); 767 inbox[0] = cpu_to_be64(mtt->offset + start_index); 768 inbox[1] = 0; 769 for (i = 0; i < chunk; ++i) 770 inbox[i + 2] = cpu_to_be64(page_list[i] | 771 MLX4_MTT_FLAG_PRESENT); 772 err = mlx4_WRITE_MTT(dev, mailbox, chunk); 773 if (err) { 774 mlx4_free_cmd_mailbox(dev, mailbox); 775 return err; 776 } 777 778 npages -= chunk; 779 start_index += chunk; 780 page_list += chunk; 781 } 782 mlx4_free_cmd_mailbox(dev, mailbox); 783 return err; 784 } 785 786 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list); 787} 788EXPORT_SYMBOL_GPL(mlx4_write_mtt); 789 790int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 791 struct mlx4_buf *buf) 792{ 793 u64 *page_list; 794 int err; 795 int i; 796 797 page_list = kcalloc(buf->npages, sizeof(*page_list), GFP_KERNEL); 798 if (!page_list) 799 return -ENOMEM; 800 801 for (i = 0; i < buf->npages; ++i) 802 if (buf->nbufs == 1) 803 page_list[i] = buf->direct.map + (i << buf->page_shift); 804 else 805 page_list[i] = buf->page_list[i].map; 806 807 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); 808 809 kfree(page_list); 810 return err; 811} 812EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt); 813 814int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type, 815 struct mlx4_mw *mw) 816{ 817 u32 index; 818 819 if ((type == MLX4_MW_TYPE_1 && 820 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)) || 821 (type == MLX4_MW_TYPE_2 && 822 !(dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN))) 823 return -EOPNOTSUPP; 824 825 index = mlx4_mpt_reserve(dev); 826 if (index == -1) 827 return -ENOMEM; 828 829 mw->key = hw_index_to_key(index); 830 mw->pd = pd; 831 mw->type = type; 832 mw->enabled = MLX4_MPT_DISABLED; 833 834 return 0; 835} 836EXPORT_SYMBOL_GPL(mlx4_mw_alloc); 837 838int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw) 839{ 840 struct mlx4_cmd_mailbox *mailbox; 841 struct mlx4_mpt_entry *mpt_entry; 842 int err; 843 844 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key)); 845 if (err) 846 return err; 847 848 mailbox = mlx4_alloc_cmd_mailbox(dev); 849 if (IS_ERR(mailbox)) { 850 err = PTR_ERR(mailbox); 851 goto err_table; 852 } 853 mpt_entry = mailbox->buf; 854 855 /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned 856 * off, thus creating a memory window and not a memory region. 857 */ 858 mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key)); 859 mpt_entry->pd_flags = cpu_to_be32(mw->pd); 860 if (mw->type == MLX4_MW_TYPE_2) { 861 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); 862 mpt_entry->qpn = cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP); 863 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_EN_INV); 864 } 865 866 err = mlx4_SW2HW_MPT(dev, mailbox, 867 key_to_hw_index(mw->key) & 868 (dev->caps.num_mpts - 1)); 869 if (err) { 870 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); 871 goto err_cmd; 872 } 873 mw->enabled = MLX4_MPT_EN_HW; 874 875 mlx4_free_cmd_mailbox(dev, mailbox); 876 877 return 0; 878 879err_cmd: 880 mlx4_free_cmd_mailbox(dev, mailbox); 881 882err_table: 883 mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key)); 884 return err; 885} 886EXPORT_SYMBOL_GPL(mlx4_mw_enable); 887 888void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw) 889{ 890 int err; 891 892 if (mw->enabled == MLX4_MPT_EN_HW) { 893 err = mlx4_HW2SW_MPT(dev, NULL, 894 key_to_hw_index(mw->key) & 895 (dev->caps.num_mpts - 1)); 896 if (err) 897 mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err); 898 899 mw->enabled = MLX4_MPT_EN_SW; 900 } 901 if (mw->enabled) 902 mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key)); 903 mlx4_mpt_release(dev, key_to_hw_index(mw->key)); 904} 905EXPORT_SYMBOL_GPL(mlx4_mw_free); 906 907int mlx4_init_mr_table(struct mlx4_dev *dev) 908{ 909 struct mlx4_priv *priv = mlx4_priv(dev); 910 struct mlx4_mr_table *mr_table = &priv->mr_table; 911 int err; 912 913 /* Nothing to do for slaves - all MR handling is forwarded 914 * to the master */ 915 if (mlx4_is_slave(dev)) 916 return 0; 917 918 if (!is_power_of_2(dev->caps.num_mpts)) 919 return -EINVAL; 920 921 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, 922 ~0, dev->caps.reserved_mrws, 0); 923 if (err) 924 return err; 925 926 err = mlx4_buddy_init(&mr_table->mtt_buddy, 927 ilog2((u32)dev->caps.num_mtts / 928 (1 << log_mtts_per_seg))); 929 if (err) 930 goto err_buddy; 931 932 if (dev->caps.reserved_mtts) { 933 priv->reserved_mtts = 934 mlx4_alloc_mtt_range(dev, 935 fls(dev->caps.reserved_mtts - 1)); 936 if (priv->reserved_mtts < 0) { 937 mlx4_warn(dev, "MTT table of order %u is too small\n", 938 mr_table->mtt_buddy.max_order); 939 err = -ENOMEM; 940 goto err_reserve_mtts; 941 } 942 } 943 944 return 0; 945 946err_reserve_mtts: 947 mlx4_buddy_cleanup(&mr_table->mtt_buddy); 948 949err_buddy: 950 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); 951 952 return err; 953} 954 955void mlx4_cleanup_mr_table(struct mlx4_dev *dev) 956{ 957 struct mlx4_priv *priv = mlx4_priv(dev); 958 struct mlx4_mr_table *mr_table = &priv->mr_table; 959 960 if (mlx4_is_slave(dev)) 961 return; 962 if (priv->reserved_mtts >= 0) 963 mlx4_free_mtt_range(dev, priv->reserved_mtts, 964 fls(dev->caps.reserved_mtts - 1)); 965 mlx4_buddy_cleanup(&mr_table->mtt_buddy); 966 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); 967} 968 969int mlx4_SYNC_TPT(struct mlx4_dev *dev) 970{ 971 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 972 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 973} 974EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);