adf_drv.c (7234B)
1// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) 2/* Copyright(c) 2014 - 2020 Intel Corporation */ 3#include <linux/kernel.h> 4#include <linux/module.h> 5#include <linux/pci.h> 6#include <linux/init.h> 7#include <linux/types.h> 8#include <linux/fs.h> 9#include <linux/slab.h> 10#include <linux/errno.h> 11#include <linux/device.h> 12#include <linux/dma-mapping.h> 13#include <linux/platform_device.h> 14#include <linux/workqueue.h> 15#include <linux/io.h> 16#include <adf_accel_devices.h> 17#include <adf_common_drv.h> 18#include <adf_cfg.h> 19#include "adf_c3xxx_hw_data.h" 20 21static const struct pci_device_id adf_pci_tbl[] = { 22 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX), }, 23 { } 24}; 25MODULE_DEVICE_TABLE(pci, adf_pci_tbl); 26 27static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); 28static void adf_remove(struct pci_dev *dev); 29 30static struct pci_driver adf_driver = { 31 .id_table = adf_pci_tbl, 32 .name = ADF_C3XXX_DEVICE_NAME, 33 .probe = adf_probe, 34 .remove = adf_remove, 35 .sriov_configure = adf_sriov_configure, 36 .err_handler = &adf_err_handler, 37}; 38 39static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) 40{ 41 pci_release_regions(accel_dev->accel_pci_dev.pci_dev); 42 pci_disable_device(accel_dev->accel_pci_dev.pci_dev); 43} 44 45static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) 46{ 47 struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; 48 int i; 49 50 for (i = 0; i < ADF_PCI_MAX_BARS; i++) { 51 struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; 52 53 if (bar->virt_addr) 54 pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr); 55 } 56 57 if (accel_dev->hw_device) { 58 switch (accel_pci_dev->pci_dev->device) { 59 case PCI_DEVICE_ID_INTEL_QAT_C3XXX: 60 adf_clean_hw_data_c3xxx(accel_dev->hw_device); 61 break; 62 default: 63 break; 64 } 65 kfree(accel_dev->hw_device); 66 accel_dev->hw_device = NULL; 67 } 68 adf_cfg_dev_remove(accel_dev); 69 debugfs_remove(accel_dev->debugfs_dir); 70 adf_devmgr_rm_dev(accel_dev, NULL); 71} 72 73static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 74{ 75 struct adf_accel_dev *accel_dev; 76 struct adf_accel_pci *accel_pci_dev; 77 struct adf_hw_device_data *hw_data; 78 char name[ADF_DEVICE_NAME_LENGTH]; 79 unsigned int i, bar_nr; 80 unsigned long bar_mask; 81 int ret; 82 83 switch (ent->device) { 84 case PCI_DEVICE_ID_INTEL_QAT_C3XXX: 85 break; 86 default: 87 dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); 88 return -ENODEV; 89 } 90 91 if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) { 92 /* If the accelerator is connected to a node with no memory 93 * there is no point in using the accelerator since the remote 94 * memory transaction will be very slow. */ 95 dev_err(&pdev->dev, "Invalid NUMA configuration.\n"); 96 return -EINVAL; 97 } 98 99 accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, 100 dev_to_node(&pdev->dev)); 101 if (!accel_dev) 102 return -ENOMEM; 103 104 INIT_LIST_HEAD(&accel_dev->crypto_list); 105 accel_pci_dev = &accel_dev->accel_pci_dev; 106 accel_pci_dev->pci_dev = pdev; 107 108 /* Add accel device to accel table. 109 * This should be called before adf_cleanup_accel is called */ 110 if (adf_devmgr_add_dev(accel_dev, NULL)) { 111 dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); 112 kfree(accel_dev); 113 return -EFAULT; 114 } 115 116 accel_dev->owner = THIS_MODULE; 117 /* Allocate and configure device configuration structure */ 118 hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, 119 dev_to_node(&pdev->dev)); 120 if (!hw_data) { 121 ret = -ENOMEM; 122 goto out_err; 123 } 124 125 accel_dev->hw_device = hw_data; 126 adf_init_hw_data_c3xxx(accel_dev->hw_device); 127 pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); 128 pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET, 129 &hw_data->fuses); 130 pci_read_config_dword(pdev, ADF_C3XXX_SOFTSTRAP_CSR_OFFSET, 131 &hw_data->straps); 132 133 /* Get Accelerators and Accelerators Engines masks */ 134 hw_data->accel_mask = hw_data->get_accel_mask(hw_data); 135 hw_data->ae_mask = hw_data->get_ae_mask(hw_data); 136 accel_pci_dev->sku = hw_data->get_sku(hw_data); 137 /* If the device has no acceleration engines then ignore it. */ 138 if (!hw_data->accel_mask || !hw_data->ae_mask || 139 ((~hw_data->ae_mask) & 0x01)) { 140 dev_err(&pdev->dev, "No acceleration units found"); 141 ret = -EFAULT; 142 goto out_err; 143 } 144 145 /* Create dev top level debugfs entry */ 146 snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX, 147 hw_data->dev_class->name, pci_name(pdev)); 148 149 accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); 150 151 /* Create device configuration table */ 152 ret = adf_cfg_dev_add(accel_dev); 153 if (ret) 154 goto out_err; 155 156 /* enable PCI device */ 157 if (pci_enable_device(pdev)) { 158 ret = -EFAULT; 159 goto out_err; 160 } 161 162 /* set dma identifier */ 163 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); 164 if (ret) { 165 dev_err(&pdev->dev, "No usable DMA configuration\n"); 166 goto out_err_disable; 167 } 168 169 if (pci_request_regions(pdev, ADF_C3XXX_DEVICE_NAME)) { 170 ret = -EFAULT; 171 goto out_err_disable; 172 } 173 174 /* Get accelerator capabilities mask */ 175 hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); 176 177 /* Find and map all the device's BARS */ 178 i = 0; 179 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); 180 for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) { 181 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; 182 183 bar->base_addr = pci_resource_start(pdev, bar_nr); 184 if (!bar->base_addr) 185 break; 186 bar->size = pci_resource_len(pdev, bar_nr); 187 bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); 188 if (!bar->virt_addr) { 189 dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr); 190 ret = -EFAULT; 191 goto out_err_free_reg; 192 } 193 } 194 pci_set_master(pdev); 195 196 adf_enable_aer(accel_dev); 197 198 if (pci_save_state(pdev)) { 199 dev_err(&pdev->dev, "Failed to save pci state\n"); 200 ret = -ENOMEM; 201 goto out_err_disable_aer; 202 } 203 204 ret = qat_crypto_dev_config(accel_dev); 205 if (ret) 206 goto out_err_disable_aer; 207 208 ret = adf_dev_init(accel_dev); 209 if (ret) 210 goto out_err_dev_shutdown; 211 212 ret = adf_dev_start(accel_dev); 213 if (ret) 214 goto out_err_dev_stop; 215 216 return ret; 217 218out_err_dev_stop: 219 adf_dev_stop(accel_dev); 220out_err_dev_shutdown: 221 adf_dev_shutdown(accel_dev); 222out_err_disable_aer: 223 adf_disable_aer(accel_dev); 224out_err_free_reg: 225 pci_release_regions(accel_pci_dev->pci_dev); 226out_err_disable: 227 pci_disable_device(accel_pci_dev->pci_dev); 228out_err: 229 adf_cleanup_accel(accel_dev); 230 kfree(accel_dev); 231 return ret; 232} 233 234static void adf_remove(struct pci_dev *pdev) 235{ 236 struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); 237 238 if (!accel_dev) { 239 pr_err("QAT: Driver removal failed\n"); 240 return; 241 } 242 adf_dev_stop(accel_dev); 243 adf_dev_shutdown(accel_dev); 244 adf_disable_aer(accel_dev); 245 adf_cleanup_accel(accel_dev); 246 adf_cleanup_pci_dev(accel_dev); 247 kfree(accel_dev); 248} 249 250static int __init adfdrv_init(void) 251{ 252 request_module("intel_qat"); 253 254 if (pci_register_driver(&adf_driver)) { 255 pr_err("QAT: Driver initialization failed\n"); 256 return -EFAULT; 257 } 258 return 0; 259} 260 261static void __exit adfdrv_release(void) 262{ 263 pci_unregister_driver(&adf_driver); 264} 265 266module_init(adfdrv_init); 267module_exit(adfdrv_release); 268 269MODULE_LICENSE("Dual BSD/GPL"); 270MODULE_AUTHOR("Intel"); 271MODULE_FIRMWARE(ADF_C3XXX_FW); 272MODULE_FIRMWARE(ADF_C3XXX_MMP); 273MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); 274MODULE_VERSION(ADF_DRV_VERSION);