cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ahb.c (26590B)


      1// SPDX-License-Identifier: BSD-3-Clause-Clear
      2/*
      3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
      4 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
      5 */
      6
      7#include <linux/module.h>
      8#include <linux/platform_device.h>
      9#include <linux/of_device.h>
     10#include <linux/of.h>
     11#include <linux/dma-mapping.h>
     12#include <linux/of_address.h>
     13#include <linux/iommu.h>
     14#include "ahb.h"
     15#include "debug.h"
     16#include "hif.h"
     17#include <linux/remoteproc.h>
     18#include "pcic.h"
     19
     20static const struct of_device_id ath11k_ahb_of_match[] = {
     21	/* TODO: Should we change the compatible string to something similar
     22	 * to one that ath10k uses?
     23	 */
     24	{ .compatible = "qcom,ipq8074-wifi",
     25	  .data = (void *)ATH11K_HW_IPQ8074,
     26	},
     27	{ .compatible = "qcom,ipq6018-wifi",
     28	  .data = (void *)ATH11K_HW_IPQ6018_HW10,
     29	},
     30	{ .compatible = "qcom,wcn6750-wifi",
     31	  .data = (void *)ATH11K_HW_WCN6750_HW10,
     32	},
     33	{ }
     34};
     35
     36MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match);
     37
     38#define ATH11K_IRQ_CE0_OFFSET 4
     39
     40static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
     41	"misc-pulse1",
     42	"misc-latch",
     43	"sw-exception",
     44	"watchdog",
     45	"ce0",
     46	"ce1",
     47	"ce2",
     48	"ce3",
     49	"ce4",
     50	"ce5",
     51	"ce6",
     52	"ce7",
     53	"ce8",
     54	"ce9",
     55	"ce10",
     56	"ce11",
     57	"host2wbm-desc-feed",
     58	"host2reo-re-injection",
     59	"host2reo-command",
     60	"host2rxdma-monitor-ring3",
     61	"host2rxdma-monitor-ring2",
     62	"host2rxdma-monitor-ring1",
     63	"reo2ost-exception",
     64	"wbm2host-rx-release",
     65	"reo2host-status",
     66	"reo2host-destination-ring4",
     67	"reo2host-destination-ring3",
     68	"reo2host-destination-ring2",
     69	"reo2host-destination-ring1",
     70	"rxdma2host-monitor-destination-mac3",
     71	"rxdma2host-monitor-destination-mac2",
     72	"rxdma2host-monitor-destination-mac1",
     73	"ppdu-end-interrupts-mac3",
     74	"ppdu-end-interrupts-mac2",
     75	"ppdu-end-interrupts-mac1",
     76	"rxdma2host-monitor-status-ring-mac3",
     77	"rxdma2host-monitor-status-ring-mac2",
     78	"rxdma2host-monitor-status-ring-mac1",
     79	"host2rxdma-host-buf-ring-mac3",
     80	"host2rxdma-host-buf-ring-mac2",
     81	"host2rxdma-host-buf-ring-mac1",
     82	"rxdma2host-destination-ring-mac3",
     83	"rxdma2host-destination-ring-mac2",
     84	"rxdma2host-destination-ring-mac1",
     85	"host2tcl-input-ring4",
     86	"host2tcl-input-ring3",
     87	"host2tcl-input-ring2",
     88	"host2tcl-input-ring1",
     89	"wbm2host-tx-completions-ring3",
     90	"wbm2host-tx-completions-ring2",
     91	"wbm2host-tx-completions-ring1",
     92	"tcl2host-status-ring",
     93};
     94
     95/* enum ext_irq_num - irq numbers that can be used by external modules
     96 * like datapath
     97 */
     98enum ext_irq_num {
     99	host2wbm_desc_feed = 16,
    100	host2reo_re_injection,
    101	host2reo_command,
    102	host2rxdma_monitor_ring3,
    103	host2rxdma_monitor_ring2,
    104	host2rxdma_monitor_ring1,
    105	reo2host_exception,
    106	wbm2host_rx_release,
    107	reo2host_status,
    108	reo2host_destination_ring4,
    109	reo2host_destination_ring3,
    110	reo2host_destination_ring2,
    111	reo2host_destination_ring1,
    112	rxdma2host_monitor_destination_mac3,
    113	rxdma2host_monitor_destination_mac2,
    114	rxdma2host_monitor_destination_mac1,
    115	ppdu_end_interrupts_mac3,
    116	ppdu_end_interrupts_mac2,
    117	ppdu_end_interrupts_mac1,
    118	rxdma2host_monitor_status_ring_mac3,
    119	rxdma2host_monitor_status_ring_mac2,
    120	rxdma2host_monitor_status_ring_mac1,
    121	host2rxdma_host_buf_ring_mac3,
    122	host2rxdma_host_buf_ring_mac2,
    123	host2rxdma_host_buf_ring_mac1,
    124	rxdma2host_destination_ring_mac3,
    125	rxdma2host_destination_ring_mac2,
    126	rxdma2host_destination_ring_mac1,
    127	host2tcl_input_ring4,
    128	host2tcl_input_ring3,
    129	host2tcl_input_ring2,
    130	host2tcl_input_ring1,
    131	wbm2host_tx_completions_ring3,
    132	wbm2host_tx_completions_ring2,
    133	wbm2host_tx_completions_ring1,
    134	tcl2host_status_ring,
    135};
    136
    137static int
    138ath11k_ahb_get_msi_irq_wcn6750(struct ath11k_base *ab, unsigned int vector)
    139{
    140	return ab->pci.msi.irqs[vector];
    141}
    142
    143static const struct ath11k_pci_ops ath11k_ahb_pci_ops_wcn6750 = {
    144	.get_msi_irq = ath11k_ahb_get_msi_irq_wcn6750,
    145};
    146
    147static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
    148{
    149	return ioread32(ab->mem + offset);
    150}
    151
    152static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value)
    153{
    154	iowrite32(value, ab->mem + offset);
    155}
    156
    157static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab)
    158{
    159	int i;
    160
    161	for (i = 0; i < ab->hw_params.ce_count; i++) {
    162		struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
    163
    164		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
    165			continue;
    166
    167		tasklet_kill(&ce_pipe->intr_tq);
    168	}
    169}
    170
    171static void ath11k_ahb_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
    172{
    173	int i;
    174
    175	for (i = 0; i < irq_grp->num_irq; i++)
    176		disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
    177}
    178
    179static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
    180{
    181	int i;
    182
    183	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
    184		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
    185
    186		ath11k_ahb_ext_grp_disable(irq_grp);
    187
    188		if (irq_grp->napi_enabled) {
    189			napi_synchronize(&irq_grp->napi);
    190			napi_disable(&irq_grp->napi);
    191			irq_grp->napi_enabled = false;
    192		}
    193	}
    194}
    195
    196static void ath11k_ahb_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
    197{
    198	int i;
    199
    200	for (i = 0; i < irq_grp->num_irq; i++)
    201		enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
    202}
    203
    204static void ath11k_ahb_setbit32(struct ath11k_base *ab, u8 bit, u32 offset)
    205{
    206	u32 val;
    207
    208	val = ath11k_ahb_read32(ab, offset);
    209	ath11k_ahb_write32(ab, offset, val | BIT(bit));
    210}
    211
    212static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset)
    213{
    214	u32 val;
    215
    216	val = ath11k_ahb_read32(ab, offset);
    217	ath11k_ahb_write32(ab, offset, val & ~BIT(bit));
    218}
    219
    220static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
    221{
    222	const struct ce_attr *ce_attr;
    223
    224	ce_attr = &ab->hw_params.host_ce_config[ce_id];
    225	if (ce_attr->src_nentries)
    226		ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
    227
    228	if (ce_attr->dest_nentries) {
    229		ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS);
    230		ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
    231				    CE_HOST_IE_3_ADDRESS);
    232	}
    233}
    234
    235static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
    236{
    237	const struct ce_attr *ce_attr;
    238
    239	ce_attr = &ab->hw_params.host_ce_config[ce_id];
    240	if (ce_attr->src_nentries)
    241		ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
    242
    243	if (ce_attr->dest_nentries) {
    244		ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS);
    245		ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
    246				      CE_HOST_IE_3_ADDRESS);
    247	}
    248}
    249
    250static void ath11k_ahb_sync_ce_irqs(struct ath11k_base *ab)
    251{
    252	int i;
    253	int irq_idx;
    254
    255	for (i = 0; i < ab->hw_params.ce_count; i++) {
    256		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
    257			continue;
    258
    259		irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
    260		synchronize_irq(ab->irq_num[irq_idx]);
    261	}
    262}
    263
    264static void ath11k_ahb_sync_ext_irqs(struct ath11k_base *ab)
    265{
    266	int i, j;
    267	int irq_idx;
    268
    269	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
    270		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
    271
    272		for (j = 0; j < irq_grp->num_irq; j++) {
    273			irq_idx = irq_grp->irqs[j];
    274			synchronize_irq(ab->irq_num[irq_idx]);
    275		}
    276	}
    277}
    278
    279static void ath11k_ahb_ce_irqs_enable(struct ath11k_base *ab)
    280{
    281	int i;
    282
    283	for (i = 0; i < ab->hw_params.ce_count; i++) {
    284		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
    285			continue;
    286		ath11k_ahb_ce_irq_enable(ab, i);
    287	}
    288}
    289
    290static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab)
    291{
    292	int i;
    293
    294	for (i = 0; i < ab->hw_params.ce_count; i++) {
    295		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
    296			continue;
    297		ath11k_ahb_ce_irq_disable(ab, i);
    298	}
    299}
    300
    301static int ath11k_ahb_start(struct ath11k_base *ab)
    302{
    303	ath11k_ahb_ce_irqs_enable(ab);
    304	ath11k_ce_rx_post_buf(ab);
    305
    306	return 0;
    307}
    308
    309static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
    310{
    311	int i;
    312
    313	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
    314		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
    315
    316		if (!irq_grp->napi_enabled) {
    317			napi_enable(&irq_grp->napi);
    318			irq_grp->napi_enabled = true;
    319		}
    320		ath11k_ahb_ext_grp_enable(irq_grp);
    321	}
    322}
    323
    324static void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
    325{
    326	__ath11k_ahb_ext_irq_disable(ab);
    327	ath11k_ahb_sync_ext_irqs(ab);
    328}
    329
    330static void ath11k_ahb_stop(struct ath11k_base *ab)
    331{
    332	if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
    333		ath11k_ahb_ce_irqs_disable(ab);
    334	ath11k_ahb_sync_ce_irqs(ab);
    335	ath11k_ahb_kill_tasklets(ab);
    336	del_timer_sync(&ab->rx_replenish_retry);
    337	ath11k_ce_cleanup_pipes(ab);
    338}
    339
    340static int ath11k_ahb_power_up(struct ath11k_base *ab)
    341{
    342	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
    343	int ret;
    344
    345	ret = rproc_boot(ab_ahb->tgt_rproc);
    346	if (ret)
    347		ath11k_err(ab, "failed to boot the remote processor Q6\n");
    348
    349	return ret;
    350}
    351
    352static void ath11k_ahb_power_down(struct ath11k_base *ab)
    353{
    354	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
    355
    356	rproc_shutdown(ab_ahb->tgt_rproc);
    357}
    358
    359static int ath11k_ahb_fwreset_from_cold_boot(struct ath11k_base *ab)
    360{
    361	int timeout;
    362
    363	if (ath11k_cold_boot_cal == 0 || ab->qmi.cal_done ||
    364	    ab->hw_params.cold_boot_calib == 0)
    365		return 0;
    366
    367	ath11k_dbg(ab, ATH11K_DBG_AHB, "wait for cold boot done\n");
    368	timeout = wait_event_timeout(ab->qmi.cold_boot_waitq,
    369				     (ab->qmi.cal_done  == 1),
    370				     ATH11K_COLD_BOOT_FW_RESET_DELAY);
    371	if (timeout <= 0) {
    372		ath11k_cold_boot_cal = 0;
    373		ath11k_warn(ab, "Coldboot Calibration failed timed out\n");
    374	}
    375
    376	/* reset the firmware */
    377	ath11k_ahb_power_down(ab);
    378	ath11k_ahb_power_up(ab);
    379
    380	ath11k_dbg(ab, ATH11K_DBG_AHB, "exited from cold boot mode\n");
    381	return 0;
    382}
    383
    384static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab)
    385{
    386	struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
    387
    388	cfg->tgt_ce_len = ab->hw_params.target_ce_count;
    389	cfg->tgt_ce = ab->hw_params.target_ce_config;
    390	cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
    391	cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
    392	ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id;
    393}
    394
    395static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
    396{
    397	int i, j;
    398
    399	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
    400		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
    401
    402		for (j = 0; j < irq_grp->num_irq; j++)
    403			free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
    404
    405		netif_napi_del(&irq_grp->napi);
    406	}
    407}
    408
    409static void ath11k_ahb_free_irq(struct ath11k_base *ab)
    410{
    411	int irq_idx;
    412	int i;
    413
    414	if (ab->hw_params.hybrid_bus_type)
    415		return ath11k_pcic_free_irq(ab);
    416
    417	for (i = 0; i < ab->hw_params.ce_count; i++) {
    418		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
    419			continue;
    420		irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
    421		free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
    422	}
    423
    424	ath11k_ahb_free_ext_irq(ab);
    425}
    426
    427static void ath11k_ahb_ce_tasklet(struct tasklet_struct *t)
    428{
    429	struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
    430
    431	ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
    432
    433	ath11k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
    434}
    435
    436static irqreturn_t ath11k_ahb_ce_interrupt_handler(int irq, void *arg)
    437{
    438	struct ath11k_ce_pipe *ce_pipe = arg;
    439
    440	/* last interrupt received for this CE */
    441	ce_pipe->timestamp = jiffies;
    442
    443	ath11k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
    444
    445	tasklet_schedule(&ce_pipe->intr_tq);
    446
    447	return IRQ_HANDLED;
    448}
    449
    450static int ath11k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
    451{
    452	struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
    453						struct ath11k_ext_irq_grp,
    454						napi);
    455	struct ath11k_base *ab = irq_grp->ab;
    456	int work_done;
    457
    458	work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
    459	if (work_done < budget) {
    460		napi_complete_done(napi, work_done);
    461		ath11k_ahb_ext_grp_enable(irq_grp);
    462	}
    463
    464	if (work_done > budget)
    465		work_done = budget;
    466
    467	return work_done;
    468}
    469
    470static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
    471{
    472	struct ath11k_ext_irq_grp *irq_grp = arg;
    473
    474	/* last interrupt received for this group */
    475	irq_grp->timestamp = jiffies;
    476
    477	ath11k_ahb_ext_grp_disable(irq_grp);
    478
    479	napi_schedule(&irq_grp->napi);
    480
    481	return IRQ_HANDLED;
    482}
    483
    484static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab)
    485{
    486	struct ath11k_hw_params *hw = &ab->hw_params;
    487	int i, j;
    488	int irq;
    489	int ret;
    490
    491	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
    492		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
    493		u32 num_irq = 0;
    494
    495		irq_grp->ab = ab;
    496		irq_grp->grp_id = i;
    497		init_dummy_netdev(&irq_grp->napi_ndev);
    498		netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
    499			       ath11k_ahb_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
    500
    501		for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
    502			if (ab->hw_params.ring_mask->tx[i] & BIT(j)) {
    503				irq_grp->irqs[num_irq++] =
    504					wbm2host_tx_completions_ring1 - j;
    505			}
    506
    507			if (ab->hw_params.ring_mask->rx[i] & BIT(j)) {
    508				irq_grp->irqs[num_irq++] =
    509					reo2host_destination_ring1 - j;
    510			}
    511
    512			if (ab->hw_params.ring_mask->rx_err[i] & BIT(j))
    513				irq_grp->irqs[num_irq++] = reo2host_exception;
    514
    515			if (ab->hw_params.ring_mask->rx_wbm_rel[i] & BIT(j))
    516				irq_grp->irqs[num_irq++] = wbm2host_rx_release;
    517
    518			if (ab->hw_params.ring_mask->reo_status[i] & BIT(j))
    519				irq_grp->irqs[num_irq++] = reo2host_status;
    520
    521			if (j < ab->hw_params.max_radios) {
    522				if (ab->hw_params.ring_mask->rxdma2host[i] & BIT(j)) {
    523					irq_grp->irqs[num_irq++] =
    524						rxdma2host_destination_ring_mac1 -
    525						ath11k_hw_get_mac_from_pdev_id(hw, j);
    526				}
    527
    528				if (ab->hw_params.ring_mask->host2rxdma[i] & BIT(j)) {
    529					irq_grp->irqs[num_irq++] =
    530						host2rxdma_host_buf_ring_mac1 -
    531						ath11k_hw_get_mac_from_pdev_id(hw, j);
    532				}
    533
    534				if (ab->hw_params.ring_mask->rx_mon_status[i] & BIT(j)) {
    535					irq_grp->irqs[num_irq++] =
    536						ppdu_end_interrupts_mac1 -
    537						ath11k_hw_get_mac_from_pdev_id(hw, j);
    538					irq_grp->irqs[num_irq++] =
    539						rxdma2host_monitor_status_ring_mac1 -
    540						ath11k_hw_get_mac_from_pdev_id(hw, j);
    541				}
    542			}
    543		}
    544		irq_grp->num_irq = num_irq;
    545
    546		for (j = 0; j < irq_grp->num_irq; j++) {
    547			int irq_idx = irq_grp->irqs[j];
    548
    549			irq = platform_get_irq_byname(ab->pdev,
    550						      irq_name[irq_idx]);
    551			ab->irq_num[irq_idx] = irq;
    552			irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
    553			ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler,
    554					  IRQF_TRIGGER_RISING,
    555					  irq_name[irq_idx], irq_grp);
    556			if (ret) {
    557				ath11k_err(ab, "failed request_irq for %d\n",
    558					   irq);
    559			}
    560		}
    561	}
    562
    563	return 0;
    564}
    565
    566static int ath11k_ahb_config_irq(struct ath11k_base *ab)
    567{
    568	int irq, irq_idx, i;
    569	int ret;
    570
    571	if (ab->hw_params.hybrid_bus_type)
    572		return ath11k_pcic_config_irq(ab);
    573
    574	/* Configure CE irqs */
    575	for (i = 0; i < ab->hw_params.ce_count; i++) {
    576		struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
    577
    578		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
    579			continue;
    580
    581		irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
    582
    583		tasklet_setup(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet);
    584		irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
    585		ret = request_irq(irq, ath11k_ahb_ce_interrupt_handler,
    586				  IRQF_TRIGGER_RISING, irq_name[irq_idx],
    587				  ce_pipe);
    588		if (ret)
    589			return ret;
    590
    591		ab->irq_num[irq_idx] = irq;
    592	}
    593
    594	/* Configure external interrupts */
    595	ret = ath11k_ahb_config_ext_irq(ab);
    596
    597	return ret;
    598}
    599
    600static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
    601					  u8 *ul_pipe, u8 *dl_pipe)
    602{
    603	const struct service_to_pipe *entry;
    604	bool ul_set = false, dl_set = false;
    605	int i;
    606
    607	for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
    608		entry = &ab->hw_params.svc_to_ce_map[i];
    609
    610		if (__le32_to_cpu(entry->service_id) != service_id)
    611			continue;
    612
    613		switch (__le32_to_cpu(entry->pipedir)) {
    614		case PIPEDIR_NONE:
    615			break;
    616		case PIPEDIR_IN:
    617			WARN_ON(dl_set);
    618			*dl_pipe = __le32_to_cpu(entry->pipenum);
    619			dl_set = true;
    620			break;
    621		case PIPEDIR_OUT:
    622			WARN_ON(ul_set);
    623			*ul_pipe = __le32_to_cpu(entry->pipenum);
    624			ul_set = true;
    625			break;
    626		case PIPEDIR_INOUT:
    627			WARN_ON(dl_set);
    628			WARN_ON(ul_set);
    629			*dl_pipe = __le32_to_cpu(entry->pipenum);
    630			*ul_pipe = __le32_to_cpu(entry->pipenum);
    631			dl_set = true;
    632			ul_set = true;
    633			break;
    634		}
    635	}
    636
    637	if (WARN_ON(!ul_set || !dl_set))
    638		return -ENOENT;
    639
    640	return 0;
    641}
    642
    643static const struct ath11k_hif_ops ath11k_ahb_hif_ops_ipq8074 = {
    644	.start = ath11k_ahb_start,
    645	.stop = ath11k_ahb_stop,
    646	.read32 = ath11k_ahb_read32,
    647	.write32 = ath11k_ahb_write32,
    648	.irq_enable = ath11k_ahb_ext_irq_enable,
    649	.irq_disable = ath11k_ahb_ext_irq_disable,
    650	.map_service_to_pipe = ath11k_ahb_map_service_to_pipe,
    651	.power_down = ath11k_ahb_power_down,
    652	.power_up = ath11k_ahb_power_up,
    653};
    654
    655static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = {
    656	.start = ath11k_pcic_start,
    657	.stop = ath11k_pcic_stop,
    658	.read32 = ath11k_pcic_read32,
    659	.write32 = ath11k_pcic_write32,
    660	.irq_enable = ath11k_pcic_ext_irq_enable,
    661	.irq_disable = ath11k_pcic_ext_irq_disable,
    662	.get_msi_address =  ath11k_pcic_get_msi_address,
    663	.get_user_msi_vector = ath11k_pcic_get_user_msi_assignment,
    664	.map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
    665	.power_down = ath11k_ahb_power_down,
    666	.power_up = ath11k_ahb_power_up,
    667};
    668
    669static int ath11k_core_get_rproc(struct ath11k_base *ab)
    670{
    671	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
    672	struct device *dev = ab->dev;
    673	struct rproc *prproc;
    674	phandle rproc_phandle;
    675
    676	if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) {
    677		ath11k_err(ab, "failed to get q6_rproc handle\n");
    678		return -ENOENT;
    679	}
    680
    681	prproc = rproc_get_by_phandle(rproc_phandle);
    682	if (!prproc) {
    683		ath11k_err(ab, "failed to get rproc\n");
    684		return -EINVAL;
    685	}
    686	ab_ahb->tgt_rproc = prproc;
    687
    688	return 0;
    689}
    690
    691static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab)
    692{
    693	struct platform_device *pdev = ab->pdev;
    694	phys_addr_t msi_addr_pa;
    695	dma_addr_t msi_addr_iova;
    696	struct resource *res;
    697	int int_prop;
    698	int ret;
    699	int i;
    700
    701	ret = ath11k_pcic_init_msi_config(ab);
    702	if (ret) {
    703		ath11k_err(ab, "failed to init msi config: %d\n", ret);
    704		return ret;
    705	}
    706
    707	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    708	if (!res) {
    709		ath11k_err(ab, "failed to fetch msi_addr\n");
    710		return -ENOENT;
    711	}
    712
    713	msi_addr_pa = res->start;
    714	msi_addr_iova = dma_map_resource(ab->dev, msi_addr_pa, PAGE_SIZE,
    715					 DMA_FROM_DEVICE, 0);
    716	if (dma_mapping_error(ab->dev, msi_addr_iova))
    717		return -ENOMEM;
    718
    719	ab->pci.msi.addr_lo = lower_32_bits(msi_addr_iova);
    720	ab->pci.msi.addr_hi = upper_32_bits(msi_addr_iova);
    721
    722	ret = of_property_read_u32_index(ab->dev->of_node, "interrupts", 1, &int_prop);
    723	if (ret)
    724		return ret;
    725
    726	ab->pci.msi.ep_base_data = int_prop + 32;
    727
    728	for (i = 0; i < ab->pci.msi.config->total_vectors; i++) {
    729		res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
    730		if (!res)
    731			return -ENODEV;
    732
    733		ab->pci.msi.irqs[i] = res->start;
    734	}
    735
    736	set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
    737
    738	return 0;
    739}
    740
    741static int ath11k_ahb_setup_resources(struct ath11k_base *ab)
    742{
    743	struct platform_device *pdev = ab->pdev;
    744	struct resource *mem_res;
    745	void __iomem *mem;
    746
    747	if (ab->hw_params.hybrid_bus_type)
    748		return ath11k_ahb_setup_msi_resources(ab);
    749
    750	mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
    751	if (IS_ERR(mem)) {
    752		dev_err(&pdev->dev, "ioremap error\n");
    753		return PTR_ERR(mem);
    754	}
    755
    756	ab->mem = mem;
    757	ab->mem_len = resource_size(mem_res);
    758
    759	return 0;
    760}
    761
    762static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
    763{
    764	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
    765	struct device *dev = ab->dev;
    766	struct device_node *node;
    767	struct resource r;
    768	int ret;
    769
    770	node = of_parse_phandle(dev->of_node, "memory-region", 0);
    771	if (!node)
    772		return -ENOENT;
    773
    774	ret = of_address_to_resource(node, 0, &r);
    775	of_node_put(node);
    776	if (ret) {
    777		dev_err(dev, "failed to resolve msa fixed region\n");
    778		return ret;
    779	}
    780
    781	ab_ahb->fw.msa_paddr = r.start;
    782	ab_ahb->fw.msa_size = resource_size(&r);
    783
    784	node = of_parse_phandle(dev->of_node, "memory-region", 1);
    785	if (!node)
    786		return -ENOENT;
    787
    788	ret = of_address_to_resource(node, 0, &r);
    789	of_node_put(node);
    790	if (ret) {
    791		dev_err(dev, "failed to resolve ce fixed region\n");
    792		return ret;
    793	}
    794
    795	ab_ahb->fw.ce_paddr = r.start;
    796	ab_ahb->fw.ce_size = resource_size(&r);
    797
    798	return 0;
    799}
    800
    801static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
    802{
    803	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
    804	struct device *host_dev = ab->dev;
    805	struct platform_device_info info = {0};
    806	struct iommu_domain *iommu_dom;
    807	struct platform_device *pdev;
    808	struct device_node *node;
    809	int ret;
    810
    811	/* Chipsets not requiring MSA need not initialize
    812	 * MSA resources, return success in such cases.
    813	 */
    814	if (!ab->hw_params.fixed_fw_mem)
    815		return 0;
    816
    817	ret = ath11k_ahb_setup_msa_resources(ab);
    818	if (ret) {
    819		ath11k_err(ab, "failed to setup msa resources\n");
    820		return ret;
    821	}
    822
    823	node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
    824	if (!node) {
    825		ab_ahb->fw.use_tz = true;
    826		return 0;
    827	}
    828
    829	info.fwnode = &node->fwnode;
    830	info.parent = host_dev;
    831	info.name = node->name;
    832	info.dma_mask = DMA_BIT_MASK(32);
    833
    834	pdev = platform_device_register_full(&info);
    835	if (IS_ERR(pdev)) {
    836		of_node_put(node);
    837		return PTR_ERR(pdev);
    838	}
    839
    840	ret = of_dma_configure(&pdev->dev, node, true);
    841	if (ret) {
    842		ath11k_err(ab, "dma configure fail: %d\n", ret);
    843		goto err_unregister;
    844	}
    845
    846	ab_ahb->fw.dev = &pdev->dev;
    847
    848	iommu_dom = iommu_domain_alloc(&platform_bus_type);
    849	if (!iommu_dom) {
    850		ath11k_err(ab, "failed to allocate iommu domain\n");
    851		ret = -ENOMEM;
    852		goto err_unregister;
    853	}
    854
    855	ret = iommu_attach_device(iommu_dom, ab_ahb->fw.dev);
    856	if (ret) {
    857		ath11k_err(ab, "could not attach device: %d\n", ret);
    858		goto err_iommu_free;
    859	}
    860
    861	ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr,
    862			ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size,
    863			IOMMU_READ | IOMMU_WRITE);
    864	if (ret) {
    865		ath11k_err(ab, "failed to map firmware region: %d\n", ret);
    866		goto err_iommu_detach;
    867	}
    868
    869	ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr,
    870			ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size,
    871			IOMMU_READ | IOMMU_WRITE);
    872	if (ret) {
    873		ath11k_err(ab, "failed to map firmware CE region: %d\n", ret);
    874		goto err_iommu_unmap;
    875	}
    876
    877	ab_ahb->fw.use_tz = false;
    878	ab_ahb->fw.iommu_domain = iommu_dom;
    879	of_node_put(node);
    880
    881	return 0;
    882
    883err_iommu_unmap:
    884	iommu_unmap(iommu_dom, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
    885
    886err_iommu_detach:
    887	iommu_detach_device(iommu_dom, ab_ahb->fw.dev);
    888
    889err_iommu_free:
    890	iommu_domain_free(iommu_dom);
    891
    892err_unregister:
    893	platform_device_unregister(pdev);
    894	of_node_put(node);
    895
    896	return ret;
    897}
    898
    899static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab)
    900{
    901	struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
    902	struct iommu_domain *iommu;
    903	size_t unmapped_size;
    904
    905	if (ab_ahb->fw.use_tz)
    906		return 0;
    907
    908	iommu = ab_ahb->fw.iommu_domain;
    909
    910	unmapped_size = iommu_unmap(iommu, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
    911	if (unmapped_size != ab_ahb->fw.msa_size)
    912		ath11k_err(ab, "failed to unmap firmware: %zu\n",
    913			   unmapped_size);
    914
    915	unmapped_size = iommu_unmap(iommu, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size);
    916	if (unmapped_size != ab_ahb->fw.ce_size)
    917		ath11k_err(ab, "failed to unmap firmware CE memory: %zu\n",
    918			   unmapped_size);
    919
    920	iommu_detach_device(iommu, ab_ahb->fw.dev);
    921	iommu_domain_free(iommu);
    922
    923	platform_device_unregister(to_platform_device(ab_ahb->fw.dev));
    924
    925	return 0;
    926}
    927
    928static int ath11k_ahb_probe(struct platform_device *pdev)
    929{
    930	struct ath11k_base *ab;
    931	const struct of_device_id *of_id;
    932	const struct ath11k_hif_ops *hif_ops;
    933	const struct ath11k_pci_ops *pci_ops;
    934	enum ath11k_hw_rev hw_rev;
    935	int ret;
    936
    937	of_id = of_match_device(ath11k_ahb_of_match, &pdev->dev);
    938	if (!of_id) {
    939		dev_err(&pdev->dev, "failed to find matching device tree id\n");
    940		return -EINVAL;
    941	}
    942
    943	hw_rev = (enum ath11k_hw_rev)of_id->data;
    944
    945	switch (hw_rev) {
    946	case ATH11K_HW_IPQ8074:
    947	case ATH11K_HW_IPQ6018_HW10:
    948		hif_ops = &ath11k_ahb_hif_ops_ipq8074;
    949		pci_ops = NULL;
    950		break;
    951	case ATH11K_HW_WCN6750_HW10:
    952		hif_ops = &ath11k_ahb_hif_ops_wcn6750;
    953		pci_ops = &ath11k_ahb_pci_ops_wcn6750;
    954		break;
    955	default:
    956		dev_err(&pdev->dev, "unsupported device type %d\n", hw_rev);
    957		return -EOPNOTSUPP;
    958	}
    959
    960	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
    961	if (ret) {
    962		dev_err(&pdev->dev, "failed to set 32-bit consistent dma\n");
    963		return ret;
    964	}
    965
    966	ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb),
    967			       ATH11K_BUS_AHB);
    968	if (!ab) {
    969		dev_err(&pdev->dev, "failed to allocate ath11k base\n");
    970		return -ENOMEM;
    971	}
    972
    973	ab->hif.ops = hif_ops;
    974	ab->pci.ops = pci_ops;
    975	ab->pdev = pdev;
    976	ab->hw_rev = hw_rev;
    977	platform_set_drvdata(pdev, ab);
    978
    979	ret = ath11k_ahb_setup_resources(ab);
    980	if (ret)
    981		goto err_core_free;
    982
    983	ret = ath11k_core_pre_init(ab);
    984	if (ret)
    985		goto err_core_free;
    986
    987	ret = ath11k_ahb_fw_resources_init(ab);
    988	if (ret)
    989		goto err_core_free;
    990
    991	ret = ath11k_hal_srng_init(ab);
    992	if (ret)
    993		goto err_fw_deinit;
    994
    995	ret = ath11k_ce_alloc_pipes(ab);
    996	if (ret) {
    997		ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
    998		goto err_hal_srng_deinit;
    999	}
   1000
   1001	ath11k_ahb_init_qmi_ce_config(ab);
   1002
   1003	ret = ath11k_core_get_rproc(ab);
   1004	if (ret) {
   1005		ath11k_err(ab, "failed to get rproc: %d\n", ret);
   1006		goto err_ce_free;
   1007	}
   1008
   1009	ret = ath11k_core_init(ab);
   1010	if (ret) {
   1011		ath11k_err(ab, "failed to init core: %d\n", ret);
   1012		goto err_ce_free;
   1013	}
   1014
   1015	ret = ath11k_ahb_config_irq(ab);
   1016	if (ret) {
   1017		ath11k_err(ab, "failed to configure irq: %d\n", ret);
   1018		goto err_ce_free;
   1019	}
   1020
   1021	ath11k_ahb_fwreset_from_cold_boot(ab);
   1022
   1023	return 0;
   1024
   1025err_ce_free:
   1026	ath11k_ce_free_pipes(ab);
   1027
   1028err_hal_srng_deinit:
   1029	ath11k_hal_srng_deinit(ab);
   1030
   1031err_fw_deinit:
   1032	ath11k_ahb_fw_resource_deinit(ab);
   1033
   1034err_core_free:
   1035	ath11k_core_free(ab);
   1036	platform_set_drvdata(pdev, NULL);
   1037
   1038	return ret;
   1039}
   1040
   1041static int ath11k_ahb_remove(struct platform_device *pdev)
   1042{
   1043	struct ath11k_base *ab = platform_get_drvdata(pdev);
   1044	unsigned long left;
   1045
   1046	if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
   1047		ath11k_ahb_power_down(ab);
   1048		ath11k_debugfs_soc_destroy(ab);
   1049		ath11k_qmi_deinit_service(ab);
   1050		goto qmi_fail;
   1051	}
   1052
   1053	reinit_completion(&ab->driver_recovery);
   1054
   1055	if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) {
   1056		left = wait_for_completion_timeout(&ab->driver_recovery,
   1057						   ATH11K_AHB_RECOVERY_TIMEOUT);
   1058		if (!left)
   1059			ath11k_warn(ab, "failed to receive recovery response completion\n");
   1060	}
   1061
   1062	set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
   1063	cancel_work_sync(&ab->restart_work);
   1064
   1065	ath11k_core_deinit(ab);
   1066qmi_fail:
   1067	ath11k_ahb_free_irq(ab);
   1068	ath11k_hal_srng_deinit(ab);
   1069	ath11k_ahb_fw_resource_deinit(ab);
   1070	ath11k_ce_free_pipes(ab);
   1071	ath11k_core_free(ab);
   1072	platform_set_drvdata(pdev, NULL);
   1073
   1074	return 0;
   1075}
   1076
   1077static struct platform_driver ath11k_ahb_driver = {
   1078	.driver         = {
   1079		.name   = "ath11k",
   1080		.of_match_table = ath11k_ahb_of_match,
   1081	},
   1082	.probe  = ath11k_ahb_probe,
   1083	.remove = ath11k_ahb_remove,
   1084};
   1085
   1086static int ath11k_ahb_init(void)
   1087{
   1088	return platform_driver_register(&ath11k_ahb_driver);
   1089}
   1090module_init(ath11k_ahb_init);
   1091
   1092static void ath11k_ahb_exit(void)
   1093{
   1094	platform_driver_unregister(&ath11k_ahb_driver);
   1095}
   1096module_exit(ath11k_ahb_exit);
   1097
   1098MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN AHB devices");
   1099MODULE_LICENSE("Dual BSD/GPL");