core.c (21853B)
1// SPDX-License-Identifier: BSD-3-Clause 2/* 3 * Copyright (c) 2020, MIPI Alliance, Inc. 4 * 5 * Author: Nicolas Pitre <npitre@baylibre.com> 6 * 7 * Core driver code with main interface to the I3C subsystem. 8 */ 9 10#include <linux/bitfield.h> 11#include <linux/device.h> 12#include <linux/errno.h> 13#include <linux/i3c/master.h> 14#include <linux/interrupt.h> 15#include <linux/io.h> 16#include <linux/iopoll.h> 17#include <linux/module.h> 18#include <linux/platform_device.h> 19 20#include "hci.h" 21#include "ext_caps.h" 22#include "cmd.h" 23#include "dat.h" 24 25 26/* 27 * Host Controller Capabilities and Operation Registers 28 */ 29 30#define reg_read(r) readl(hci->base_regs + (r)) 31#define reg_write(r, v) writel(v, hci->base_regs + (r)) 32#define reg_set(r, v) reg_write(r, reg_read(r) | (v)) 33#define reg_clear(r, v) reg_write(r, reg_read(r) & ~(v)) 34 35#define HCI_VERSION 0x00 /* HCI Version (in BCD) */ 36 37#define HC_CONTROL 0x04 38#define HC_CONTROL_BUS_ENABLE BIT(31) 39#define HC_CONTROL_RESUME BIT(30) 40#define HC_CONTROL_ABORT BIT(29) 41#define HC_CONTROL_HALT_ON_CMD_TIMEOUT BIT(12) 42#define HC_CONTROL_HOT_JOIN_CTRL BIT(8) /* Hot-Join ACK/NACK Control */ 43#define HC_CONTROL_I2C_TARGET_PRESENT BIT(7) 44#define HC_CONTROL_PIO_MODE BIT(6) /* DMA/PIO Mode Selector */ 45#define HC_CONTROL_DATA_BIG_ENDIAN BIT(4) 46#define HC_CONTROL_IBA_INCLUDE BIT(0) /* Include I3C Broadcast Address */ 47 48#define MASTER_DEVICE_ADDR 0x08 /* Master Device Address */ 49#define MASTER_DYNAMIC_ADDR_VALID BIT(31) /* Dynamic Address is Valid */ 50#define MASTER_DYNAMIC_ADDR(v) FIELD_PREP(GENMASK(22, 16), v) 51 52#define HC_CAPABILITIES 0x0c 53#define HC_CAP_SG_DC_EN BIT(30) 54#define HC_CAP_SG_IBI_EN BIT(29) 55#define HC_CAP_SG_CR_EN BIT(28) 56#define HC_CAP_MAX_DATA_LENGTH GENMASK(24, 22) 57#define HC_CAP_CMD_SIZE GENMASK(21, 20) 58#define HC_CAP_DIRECT_COMMANDS_EN BIT(18) 59#define HC_CAP_MULTI_LANE_EN BIT(15) 60#define HC_CAP_CMD_CCC_DEFBYTE BIT(10) 61#define HC_CAP_HDR_BT_EN BIT(8) 62#define HC_CAP_HDR_TS_EN BIT(7) 63#define HC_CAP_HDR_DDR_EN BIT(6) 64#define HC_CAP_NON_CURRENT_MASTER_CAP BIT(5) /* master handoff capable */ 65#define HC_CAP_DATA_BYTE_CFG_EN BIT(4) /* endian selection possible */ 66#define HC_CAP_AUTO_COMMAND BIT(3) 67#define HC_CAP_COMBO_COMMAND BIT(2) 68 69#define RESET_CONTROL 0x10 70#define BUS_RESET BIT(31) 71#define BUS_RESET_TYPE GENMASK(30, 29) 72#define IBI_QUEUE_RST BIT(5) 73#define RX_FIFO_RST BIT(4) 74#define TX_FIFO_RST BIT(3) 75#define RESP_QUEUE_RST BIT(2) 76#define CMD_QUEUE_RST BIT(1) 77#define SOFT_RST BIT(0) /* Core Reset */ 78 79#define PRESENT_STATE 0x14 80#define STATE_CURRENT_MASTER BIT(2) 81 82#define INTR_STATUS 0x20 83#define INTR_STATUS_ENABLE 0x24 84#define INTR_SIGNAL_ENABLE 0x28 85#define INTR_FORCE 0x2c 86#define INTR_HC_CMD_SEQ_UFLOW_STAT BIT(12) /* Cmd Sequence Underflow */ 87#define INTR_HC_RESET_CANCEL BIT(11) /* HC Cancelled Reset */ 88#define INTR_HC_INTERNAL_ERR BIT(10) /* HC Internal Error */ 89#define INTR_HC_PIO BIT(8) /* cascaded PIO interrupt */ 90#define INTR_HC_RINGS GENMASK(7, 0) 91 92#define DAT_SECTION 0x30 /* Device Address Table */ 93#define DAT_ENTRY_SIZE GENMASK(31, 28) 94#define DAT_TABLE_SIZE GENMASK(18, 12) 95#define DAT_TABLE_OFFSET GENMASK(11, 0) 96 97#define DCT_SECTION 0x34 /* Device Characteristics Table */ 98#define DCT_ENTRY_SIZE GENMASK(31, 28) 99#define DCT_TABLE_INDEX GENMASK(23, 19) 100#define DCT_TABLE_SIZE GENMASK(18, 12) 101#define DCT_TABLE_OFFSET GENMASK(11, 0) 102 103#define RING_HEADERS_SECTION 0x38 104#define RING_HEADERS_OFFSET GENMASK(15, 0) 105 106#define PIO_SECTION 0x3c 107#define PIO_REGS_OFFSET GENMASK(15, 0) /* PIO Offset */ 108 109#define EXT_CAPS_SECTION 0x40 110#define EXT_CAPS_OFFSET GENMASK(15, 0) 111 112#define IBI_NOTIFY_CTRL 0x58 /* IBI Notify Control */ 113#define IBI_NOTIFY_SIR_REJECTED BIT(3) /* Rejected Target Interrupt Request */ 114#define IBI_NOTIFY_MR_REJECTED BIT(1) /* Rejected Master Request Control */ 115#define IBI_NOTIFY_HJ_REJECTED BIT(0) /* Rejected Hot-Join Control */ 116 117#define DEV_CTX_BASE_LO 0x60 118#define DEV_CTX_BASE_HI 0x64 119 120 121static inline struct i3c_hci *to_i3c_hci(struct i3c_master_controller *m) 122{ 123 return container_of(m, struct i3c_hci, master); 124} 125 126static int i3c_hci_bus_init(struct i3c_master_controller *m) 127{ 128 struct i3c_hci *hci = to_i3c_hci(m); 129 struct i3c_device_info info; 130 int ret; 131 132 DBG(""); 133 134 if (hci->cmd == &mipi_i3c_hci_cmd_v1) { 135 ret = mipi_i3c_hci_dat_v1.init(hci); 136 if (ret) 137 return ret; 138 } 139 140 ret = i3c_master_get_free_addr(m, 0); 141 if (ret < 0) 142 return ret; 143 reg_write(MASTER_DEVICE_ADDR, 144 MASTER_DYNAMIC_ADDR(ret) | MASTER_DYNAMIC_ADDR_VALID); 145 memset(&info, 0, sizeof(info)); 146 info.dyn_addr = ret; 147 ret = i3c_master_set_info(m, &info); 148 if (ret) 149 return ret; 150 151 ret = hci->io->init(hci); 152 if (ret) 153 return ret; 154 155 reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE); 156 DBG("HC_CONTROL = %#x", reg_read(HC_CONTROL)); 157 158 return 0; 159} 160 161static void i3c_hci_bus_cleanup(struct i3c_master_controller *m) 162{ 163 struct i3c_hci *hci = to_i3c_hci(m); 164 165 DBG(""); 166 167 reg_clear(HC_CONTROL, HC_CONTROL_BUS_ENABLE); 168 hci->io->cleanup(hci); 169 if (hci->cmd == &mipi_i3c_hci_cmd_v1) 170 mipi_i3c_hci_dat_v1.cleanup(hci); 171} 172 173void mipi_i3c_hci_resume(struct i3c_hci *hci) 174{ 175 /* the HC_CONTROL_RESUME bit is R/W1C so just read and write back */ 176 reg_write(HC_CONTROL, reg_read(HC_CONTROL)); 177} 178 179/* located here rather than pio.c because needed bits are in core reg space */ 180void mipi_i3c_hci_pio_reset(struct i3c_hci *hci) 181{ 182 reg_write(RESET_CONTROL, RX_FIFO_RST | TX_FIFO_RST | RESP_QUEUE_RST); 183} 184 185/* located here rather than dct.c because needed bits are in core reg space */ 186void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci) 187{ 188 reg_write(DCT_SECTION, FIELD_PREP(DCT_TABLE_INDEX, 0)); 189} 190 191static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m, 192 struct i3c_ccc_cmd *ccc) 193{ 194 struct i3c_hci *hci = to_i3c_hci(m); 195 struct hci_xfer *xfer; 196 bool raw = !!(hci->quirks & HCI_QUIRK_RAW_CCC); 197 bool prefixed = raw && !!(ccc->id & I3C_CCC_DIRECT); 198 unsigned int nxfers = ccc->ndests + prefixed; 199 DECLARE_COMPLETION_ONSTACK(done); 200 int i, last, ret = 0; 201 202 DBG("cmd=%#x rnw=%d ndests=%d data[0].len=%d", 203 ccc->id, ccc->rnw, ccc->ndests, ccc->dests[0].payload.len); 204 205 xfer = hci_alloc_xfer(nxfers); 206 if (!xfer) 207 return -ENOMEM; 208 209 if (prefixed) { 210 xfer->data = NULL; 211 xfer->data_len = 0; 212 xfer->rnw = false; 213 hci->cmd->prep_ccc(hci, xfer, I3C_BROADCAST_ADDR, 214 ccc->id, true); 215 xfer++; 216 } 217 218 for (i = 0; i < nxfers - prefixed; i++) { 219 xfer[i].data = ccc->dests[i].payload.data; 220 xfer[i].data_len = ccc->dests[i].payload.len; 221 xfer[i].rnw = ccc->rnw; 222 ret = hci->cmd->prep_ccc(hci, &xfer[i], ccc->dests[i].addr, 223 ccc->id, raw); 224 if (ret) 225 goto out; 226 xfer[i].cmd_desc[0] |= CMD_0_ROC; 227 } 228 last = i - 1; 229 xfer[last].cmd_desc[0] |= CMD_0_TOC; 230 xfer[last].completion = &done; 231 232 if (prefixed) 233 xfer--; 234 235 ret = hci->io->queue_xfer(hci, xfer, nxfers); 236 if (ret) 237 goto out; 238 if (!wait_for_completion_timeout(&done, HZ) && 239 hci->io->dequeue_xfer(hci, xfer, nxfers)) { 240 ret = -ETIME; 241 goto out; 242 } 243 for (i = prefixed; i < nxfers; i++) { 244 if (ccc->rnw) 245 ccc->dests[i - prefixed].payload.len = 246 RESP_DATA_LENGTH(xfer[i].response); 247 if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) { 248 ret = -EIO; 249 goto out; 250 } 251 } 252 253 if (ccc->rnw) 254 DBG("got: %*ph", 255 ccc->dests[0].payload.len, ccc->dests[0].payload.data); 256 257out: 258 hci_free_xfer(xfer, nxfers); 259 return ret; 260} 261 262static int i3c_hci_daa(struct i3c_master_controller *m) 263{ 264 struct i3c_hci *hci = to_i3c_hci(m); 265 266 DBG(""); 267 268 return hci->cmd->perform_daa(hci); 269} 270 271static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev, 272 struct i3c_priv_xfer *i3c_xfers, 273 int nxfers) 274{ 275 struct i3c_master_controller *m = i3c_dev_get_master(dev); 276 struct i3c_hci *hci = to_i3c_hci(m); 277 struct hci_xfer *xfer; 278 DECLARE_COMPLETION_ONSTACK(done); 279 unsigned int size_limit; 280 int i, last, ret = 0; 281 282 DBG("nxfers = %d", nxfers); 283 284 xfer = hci_alloc_xfer(nxfers); 285 if (!xfer) 286 return -ENOMEM; 287 288 size_limit = 1U << (16 + FIELD_GET(HC_CAP_MAX_DATA_LENGTH, hci->caps)); 289 290 for (i = 0; i < nxfers; i++) { 291 xfer[i].data_len = i3c_xfers[i].len; 292 ret = -EFBIG; 293 if (xfer[i].data_len >= size_limit) 294 goto out; 295 xfer[i].rnw = i3c_xfers[i].rnw; 296 if (i3c_xfers[i].rnw) { 297 xfer[i].data = i3c_xfers[i].data.in; 298 } else { 299 /* silence the const qualifier warning with a cast */ 300 xfer[i].data = (void *) i3c_xfers[i].data.out; 301 } 302 hci->cmd->prep_i3c_xfer(hci, dev, &xfer[i]); 303 xfer[i].cmd_desc[0] |= CMD_0_ROC; 304 } 305 last = i - 1; 306 xfer[last].cmd_desc[0] |= CMD_0_TOC; 307 xfer[last].completion = &done; 308 309 ret = hci->io->queue_xfer(hci, xfer, nxfers); 310 if (ret) 311 goto out; 312 if (!wait_for_completion_timeout(&done, HZ) && 313 hci->io->dequeue_xfer(hci, xfer, nxfers)) { 314 ret = -ETIME; 315 goto out; 316 } 317 for (i = 0; i < nxfers; i++) { 318 if (i3c_xfers[i].rnw) 319 i3c_xfers[i].len = RESP_DATA_LENGTH(xfer[i].response); 320 if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) { 321 ret = -EIO; 322 goto out; 323 } 324 } 325 326out: 327 hci_free_xfer(xfer, nxfers); 328 return ret; 329} 330 331static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev, 332 const struct i2c_msg *i2c_xfers, int nxfers) 333{ 334 struct i3c_master_controller *m = i2c_dev_get_master(dev); 335 struct i3c_hci *hci = to_i3c_hci(m); 336 struct hci_xfer *xfer; 337 DECLARE_COMPLETION_ONSTACK(done); 338 int i, last, ret = 0; 339 340 DBG("nxfers = %d", nxfers); 341 342 xfer = hci_alloc_xfer(nxfers); 343 if (!xfer) 344 return -ENOMEM; 345 346 for (i = 0; i < nxfers; i++) { 347 xfer[i].data = i2c_xfers[i].buf; 348 xfer[i].data_len = i2c_xfers[i].len; 349 xfer[i].rnw = i2c_xfers[i].flags & I2C_M_RD; 350 hci->cmd->prep_i2c_xfer(hci, dev, &xfer[i]); 351 xfer[i].cmd_desc[0] |= CMD_0_ROC; 352 } 353 last = i - 1; 354 xfer[last].cmd_desc[0] |= CMD_0_TOC; 355 xfer[last].completion = &done; 356 357 ret = hci->io->queue_xfer(hci, xfer, nxfers); 358 if (ret) 359 goto out; 360 if (!wait_for_completion_timeout(&done, HZ) && 361 hci->io->dequeue_xfer(hci, xfer, nxfers)) { 362 ret = -ETIME; 363 goto out; 364 } 365 for (i = 0; i < nxfers; i++) { 366 if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) { 367 ret = -EIO; 368 goto out; 369 } 370 } 371 372out: 373 hci_free_xfer(xfer, nxfers); 374 return ret; 375} 376 377static int i3c_hci_attach_i3c_dev(struct i3c_dev_desc *dev) 378{ 379 struct i3c_master_controller *m = i3c_dev_get_master(dev); 380 struct i3c_hci *hci = to_i3c_hci(m); 381 struct i3c_hci_dev_data *dev_data; 382 int ret; 383 384 DBG(""); 385 386 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); 387 if (!dev_data) 388 return -ENOMEM; 389 if (hci->cmd == &mipi_i3c_hci_cmd_v1) { 390 ret = mipi_i3c_hci_dat_v1.alloc_entry(hci); 391 if (ret < 0) { 392 kfree(dev_data); 393 return ret; 394 } 395 mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, ret, dev->info.dyn_addr); 396 dev_data->dat_idx = ret; 397 } 398 i3c_dev_set_master_data(dev, dev_data); 399 return 0; 400} 401 402static int i3c_hci_reattach_i3c_dev(struct i3c_dev_desc *dev, u8 old_dyn_addr) 403{ 404 struct i3c_master_controller *m = i3c_dev_get_master(dev); 405 struct i3c_hci *hci = to_i3c_hci(m); 406 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev); 407 408 DBG(""); 409 410 if (hci->cmd == &mipi_i3c_hci_cmd_v1) 411 mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dev_data->dat_idx, 412 dev->info.dyn_addr); 413 return 0; 414} 415 416static void i3c_hci_detach_i3c_dev(struct i3c_dev_desc *dev) 417{ 418 struct i3c_master_controller *m = i3c_dev_get_master(dev); 419 struct i3c_hci *hci = to_i3c_hci(m); 420 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev); 421 422 DBG(""); 423 424 i3c_dev_set_master_data(dev, NULL); 425 if (hci->cmd == &mipi_i3c_hci_cmd_v1) 426 mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx); 427 kfree(dev_data); 428} 429 430static int i3c_hci_attach_i2c_dev(struct i2c_dev_desc *dev) 431{ 432 struct i3c_master_controller *m = i2c_dev_get_master(dev); 433 struct i3c_hci *hci = to_i3c_hci(m); 434 struct i3c_hci_dev_data *dev_data; 435 int ret; 436 437 DBG(""); 438 439 if (hci->cmd != &mipi_i3c_hci_cmd_v1) 440 return 0; 441 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); 442 if (!dev_data) 443 return -ENOMEM; 444 ret = mipi_i3c_hci_dat_v1.alloc_entry(hci); 445 if (ret < 0) { 446 kfree(dev_data); 447 return ret; 448 } 449 mipi_i3c_hci_dat_v1.set_static_addr(hci, ret, dev->addr); 450 mipi_i3c_hci_dat_v1.set_flags(hci, ret, DAT_0_I2C_DEVICE, 0); 451 dev_data->dat_idx = ret; 452 i2c_dev_set_master_data(dev, dev_data); 453 return 0; 454} 455 456static void i3c_hci_detach_i2c_dev(struct i2c_dev_desc *dev) 457{ 458 struct i3c_master_controller *m = i2c_dev_get_master(dev); 459 struct i3c_hci *hci = to_i3c_hci(m); 460 struct i3c_hci_dev_data *dev_data = i2c_dev_get_master_data(dev); 461 462 DBG(""); 463 464 if (dev_data) { 465 i2c_dev_set_master_data(dev, NULL); 466 if (hci->cmd == &mipi_i3c_hci_cmd_v1) 467 mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx); 468 kfree(dev_data); 469 } 470} 471 472static int i3c_hci_request_ibi(struct i3c_dev_desc *dev, 473 const struct i3c_ibi_setup *req) 474{ 475 struct i3c_master_controller *m = i3c_dev_get_master(dev); 476 struct i3c_hci *hci = to_i3c_hci(m); 477 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev); 478 unsigned int dat_idx = dev_data->dat_idx; 479 480 if (req->max_payload_len != 0) 481 mipi_i3c_hci_dat_v1.set_flags(hci, dat_idx, DAT_0_IBI_PAYLOAD, 0); 482 else 483 mipi_i3c_hci_dat_v1.clear_flags(hci, dat_idx, DAT_0_IBI_PAYLOAD, 0); 484 return hci->io->request_ibi(hci, dev, req); 485} 486 487static void i3c_hci_free_ibi(struct i3c_dev_desc *dev) 488{ 489 struct i3c_master_controller *m = i3c_dev_get_master(dev); 490 struct i3c_hci *hci = to_i3c_hci(m); 491 492 hci->io->free_ibi(hci, dev); 493} 494 495static int i3c_hci_enable_ibi(struct i3c_dev_desc *dev) 496{ 497 struct i3c_master_controller *m = i3c_dev_get_master(dev); 498 struct i3c_hci *hci = to_i3c_hci(m); 499 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev); 500 501 mipi_i3c_hci_dat_v1.clear_flags(hci, dev_data->dat_idx, DAT_0_SIR_REJECT, 0); 502 return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR); 503} 504 505static int i3c_hci_disable_ibi(struct i3c_dev_desc *dev) 506{ 507 struct i3c_master_controller *m = i3c_dev_get_master(dev); 508 struct i3c_hci *hci = to_i3c_hci(m); 509 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev); 510 511 mipi_i3c_hci_dat_v1.set_flags(hci, dev_data->dat_idx, DAT_0_SIR_REJECT, 0); 512 return i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR); 513} 514 515static void i3c_hci_recycle_ibi_slot(struct i3c_dev_desc *dev, 516 struct i3c_ibi_slot *slot) 517{ 518 struct i3c_master_controller *m = i3c_dev_get_master(dev); 519 struct i3c_hci *hci = to_i3c_hci(m); 520 521 hci->io->recycle_ibi_slot(hci, dev, slot); 522} 523 524static const struct i3c_master_controller_ops i3c_hci_ops = { 525 .bus_init = i3c_hci_bus_init, 526 .bus_cleanup = i3c_hci_bus_cleanup, 527 .do_daa = i3c_hci_daa, 528 .send_ccc_cmd = i3c_hci_send_ccc_cmd, 529 .priv_xfers = i3c_hci_priv_xfers, 530 .i2c_xfers = i3c_hci_i2c_xfers, 531 .attach_i3c_dev = i3c_hci_attach_i3c_dev, 532 .reattach_i3c_dev = i3c_hci_reattach_i3c_dev, 533 .detach_i3c_dev = i3c_hci_detach_i3c_dev, 534 .attach_i2c_dev = i3c_hci_attach_i2c_dev, 535 .detach_i2c_dev = i3c_hci_detach_i2c_dev, 536 .request_ibi = i3c_hci_request_ibi, 537 .free_ibi = i3c_hci_free_ibi, 538 .enable_ibi = i3c_hci_enable_ibi, 539 .disable_ibi = i3c_hci_disable_ibi, 540 .recycle_ibi_slot = i3c_hci_recycle_ibi_slot, 541}; 542 543static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id) 544{ 545 struct i3c_hci *hci = dev_id; 546 irqreturn_t result = IRQ_NONE; 547 u32 val; 548 549 val = reg_read(INTR_STATUS); 550 DBG("INTR_STATUS = %#x", val); 551 552 if (val) { 553 reg_write(INTR_STATUS, val); 554 } else { 555 /* v1.0 does not have PIO cascaded notification bits */ 556 val |= INTR_HC_PIO; 557 } 558 559 if (val & INTR_HC_RESET_CANCEL) { 560 DBG("cancelled reset"); 561 val &= ~INTR_HC_RESET_CANCEL; 562 } 563 if (val & INTR_HC_INTERNAL_ERR) { 564 dev_err(&hci->master.dev, "Host Controller Internal Error\n"); 565 val &= ~INTR_HC_INTERNAL_ERR; 566 } 567 if (val & INTR_HC_PIO) { 568 hci->io->irq_handler(hci, 0); 569 val &= ~INTR_HC_PIO; 570 } 571 if (val & INTR_HC_RINGS) { 572 hci->io->irq_handler(hci, val & INTR_HC_RINGS); 573 val &= ~INTR_HC_RINGS; 574 } 575 if (val) 576 dev_err(&hci->master.dev, "unexpected INTR_STATUS %#x\n", val); 577 else 578 result = IRQ_HANDLED; 579 580 return result; 581} 582 583static int i3c_hci_init(struct i3c_hci *hci) 584{ 585 u32 regval, offset; 586 int ret; 587 588 /* Validate HCI hardware version */ 589 regval = reg_read(HCI_VERSION); 590 hci->version_major = (regval >> 8) & 0xf; 591 hci->version_minor = (regval >> 4) & 0xf; 592 hci->revision = regval & 0xf; 593 dev_notice(&hci->master.dev, "MIPI I3C HCI v%u.%u r%02u\n", 594 hci->version_major, hci->version_minor, hci->revision); 595 /* known versions */ 596 switch (regval & ~0xf) { 597 case 0x100: /* version 1.0 */ 598 case 0x110: /* version 1.1 */ 599 case 0x200: /* version 2.0 */ 600 break; 601 default: 602 dev_err(&hci->master.dev, "unsupported HCI version\n"); 603 return -EPROTONOSUPPORT; 604 } 605 606 hci->caps = reg_read(HC_CAPABILITIES); 607 DBG("caps = %#x", hci->caps); 608 609 regval = reg_read(DAT_SECTION); 610 offset = FIELD_GET(DAT_TABLE_OFFSET, regval); 611 hci->DAT_regs = offset ? hci->base_regs + offset : NULL; 612 hci->DAT_entries = FIELD_GET(DAT_TABLE_SIZE, regval); 613 hci->DAT_entry_size = FIELD_GET(DAT_ENTRY_SIZE, regval); 614 dev_info(&hci->master.dev, "DAT: %u %u-bytes entries at offset %#x\n", 615 hci->DAT_entries, hci->DAT_entry_size * 4, offset); 616 617 regval = reg_read(DCT_SECTION); 618 offset = FIELD_GET(DCT_TABLE_OFFSET, regval); 619 hci->DCT_regs = offset ? hci->base_regs + offset : NULL; 620 hci->DCT_entries = FIELD_GET(DCT_TABLE_SIZE, regval); 621 hci->DCT_entry_size = FIELD_GET(DCT_ENTRY_SIZE, regval); 622 dev_info(&hci->master.dev, "DCT: %u %u-bytes entries at offset %#x\n", 623 hci->DCT_entries, hci->DCT_entry_size * 4, offset); 624 625 regval = reg_read(RING_HEADERS_SECTION); 626 offset = FIELD_GET(RING_HEADERS_OFFSET, regval); 627 hci->RHS_regs = offset ? hci->base_regs + offset : NULL; 628 dev_info(&hci->master.dev, "Ring Headers at offset %#x\n", offset); 629 630 regval = reg_read(PIO_SECTION); 631 offset = FIELD_GET(PIO_REGS_OFFSET, regval); 632 hci->PIO_regs = offset ? hci->base_regs + offset : NULL; 633 dev_info(&hci->master.dev, "PIO section at offset %#x\n", offset); 634 635 regval = reg_read(EXT_CAPS_SECTION); 636 offset = FIELD_GET(EXT_CAPS_OFFSET, regval); 637 hci->EXTCAPS_regs = offset ? hci->base_regs + offset : NULL; 638 dev_info(&hci->master.dev, "Extended Caps at offset %#x\n", offset); 639 640 ret = i3c_hci_parse_ext_caps(hci); 641 if (ret) 642 return ret; 643 644 /* 645 * Now let's reset the hardware. 646 * SOFT_RST must be clear before we write to it. 647 * Then we must wait until it clears again. 648 */ 649 ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval, 650 !(regval & SOFT_RST), 1, 10000); 651 if (ret) 652 return -ENXIO; 653 reg_write(RESET_CONTROL, SOFT_RST); 654 ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval, 655 !(regval & SOFT_RST), 1, 10000); 656 if (ret) 657 return -ENXIO; 658 659 /* Disable all interrupts and allow all signal updates */ 660 reg_write(INTR_SIGNAL_ENABLE, 0x0); 661 reg_write(INTR_STATUS_ENABLE, 0xffffffff); 662 663 /* Make sure our data ordering fits the host's */ 664 regval = reg_read(HC_CONTROL); 665 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) { 666 if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) { 667 regval |= HC_CONTROL_DATA_BIG_ENDIAN; 668 reg_write(HC_CONTROL, regval); 669 regval = reg_read(HC_CONTROL); 670 if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) { 671 dev_err(&hci->master.dev, "cannot set BE mode\n"); 672 return -EOPNOTSUPP; 673 } 674 } 675 } else { 676 if (regval & HC_CONTROL_DATA_BIG_ENDIAN) { 677 regval &= ~HC_CONTROL_DATA_BIG_ENDIAN; 678 reg_write(HC_CONTROL, regval); 679 regval = reg_read(HC_CONTROL); 680 if (regval & HC_CONTROL_DATA_BIG_ENDIAN) { 681 dev_err(&hci->master.dev, "cannot clear BE mode\n"); 682 return -EOPNOTSUPP; 683 } 684 } 685 } 686 687 /* Select our command descriptor model */ 688 switch (FIELD_GET(HC_CAP_CMD_SIZE, hci->caps)) { 689 case 0: 690 hci->cmd = &mipi_i3c_hci_cmd_v1; 691 break; 692 case 1: 693 hci->cmd = &mipi_i3c_hci_cmd_v2; 694 break; 695 default: 696 dev_err(&hci->master.dev, "wrong CMD_SIZE capability value\n"); 697 return -EINVAL; 698 } 699 700 /* Try activating DMA operations first */ 701 if (hci->RHS_regs) { 702 reg_clear(HC_CONTROL, HC_CONTROL_PIO_MODE); 703 if (reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE) { 704 dev_err(&hci->master.dev, "PIO mode is stuck\n"); 705 ret = -EIO; 706 } else { 707 hci->io = &mipi_i3c_hci_dma; 708 dev_info(&hci->master.dev, "Using DMA\n"); 709 } 710 } 711 712 /* If no DMA, try PIO */ 713 if (!hci->io && hci->PIO_regs) { 714 reg_set(HC_CONTROL, HC_CONTROL_PIO_MODE); 715 if (!(reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) { 716 dev_err(&hci->master.dev, "DMA mode is stuck\n"); 717 ret = -EIO; 718 } else { 719 hci->io = &mipi_i3c_hci_pio; 720 dev_info(&hci->master.dev, "Using PIO\n"); 721 } 722 } 723 724 if (!hci->io) { 725 dev_err(&hci->master.dev, "neither DMA nor PIO can be used\n"); 726 if (!ret) 727 ret = -EINVAL; 728 return ret; 729 } 730 731 return 0; 732} 733 734static int i3c_hci_probe(struct platform_device *pdev) 735{ 736 struct i3c_hci *hci; 737 int irq, ret; 738 739 hci = devm_kzalloc(&pdev->dev, sizeof(*hci), GFP_KERNEL); 740 if (!hci) 741 return -ENOMEM; 742 hci->base_regs = devm_platform_ioremap_resource(pdev, 0); 743 if (IS_ERR(hci->base_regs)) 744 return PTR_ERR(hci->base_regs); 745 746 platform_set_drvdata(pdev, hci); 747 /* temporary for dev_printk's, to be replaced in i3c_master_register */ 748 hci->master.dev.init_name = dev_name(&pdev->dev); 749 750 ret = i3c_hci_init(hci); 751 if (ret) 752 return ret; 753 754 irq = platform_get_irq(pdev, 0); 755 ret = devm_request_irq(&pdev->dev, irq, i3c_hci_irq_handler, 756 0, NULL, hci); 757 if (ret) 758 return ret; 759 760 ret = i3c_master_register(&hci->master, &pdev->dev, 761 &i3c_hci_ops, false); 762 if (ret) 763 return ret; 764 765 return 0; 766} 767 768static int i3c_hci_remove(struct platform_device *pdev) 769{ 770 struct i3c_hci *hci = platform_get_drvdata(pdev); 771 772 return i3c_master_unregister(&hci->master); 773} 774 775static const __maybe_unused struct of_device_id i3c_hci_of_match[] = { 776 { .compatible = "mipi-i3c-hci", }, 777 {}, 778}; 779MODULE_DEVICE_TABLE(of, i3c_hci_of_match); 780 781static struct platform_driver i3c_hci_driver = { 782 .probe = i3c_hci_probe, 783 .remove = i3c_hci_remove, 784 .driver = { 785 .name = "mipi-i3c-hci", 786 .of_match_table = of_match_ptr(i3c_hci_of_match), 787 }, 788}; 789module_platform_driver(i3c_hci_driver); 790 791MODULE_AUTHOR("Nicolas Pitre <npitre@baylibre.com>"); 792MODULE_DESCRIPTION("MIPI I3C HCI driver"); 793MODULE_LICENSE("Dual BSD/GPL");