adf_ctl_drv.c (11265B)
1// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) 2/* Copyright(c) 2014 - 2020 Intel Corporation */ 3#include <linux/module.h> 4#include <linux/mutex.h> 5#include <linux/slab.h> 6#include <linux/fs.h> 7#include <linux/bitops.h> 8#include <linux/pci.h> 9#include <linux/cdev.h> 10#include <linux/uaccess.h> 11#include <linux/crypto.h> 12 13#include "adf_accel_devices.h" 14#include "adf_common_drv.h" 15#include "adf_cfg.h" 16#include "adf_cfg_common.h" 17#include "adf_cfg_user.h" 18 19#define DEVICE_NAME "qat_adf_ctl" 20 21static DEFINE_MUTEX(adf_ctl_lock); 22static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg); 23 24static const struct file_operations adf_ctl_ops = { 25 .owner = THIS_MODULE, 26 .unlocked_ioctl = adf_ctl_ioctl, 27 .compat_ioctl = compat_ptr_ioctl, 28}; 29 30struct adf_ctl_drv_info { 31 unsigned int major; 32 struct cdev drv_cdev; 33 struct class *drv_class; 34}; 35 36static struct adf_ctl_drv_info adf_ctl_drv; 37 38static void adf_chr_drv_destroy(void) 39{ 40 device_destroy(adf_ctl_drv.drv_class, MKDEV(adf_ctl_drv.major, 0)); 41 cdev_del(&adf_ctl_drv.drv_cdev); 42 class_destroy(adf_ctl_drv.drv_class); 43 unregister_chrdev_region(MKDEV(adf_ctl_drv.major, 0), 1); 44} 45 46static int adf_chr_drv_create(void) 47{ 48 dev_t dev_id; 49 struct device *drv_device; 50 51 if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) { 52 pr_err("QAT: unable to allocate chrdev region\n"); 53 return -EFAULT; 54 } 55 56 adf_ctl_drv.drv_class = class_create(THIS_MODULE, DEVICE_NAME); 57 if (IS_ERR(adf_ctl_drv.drv_class)) { 58 pr_err("QAT: class_create failed for adf_ctl\n"); 59 goto err_chrdev_unreg; 60 } 61 adf_ctl_drv.major = MAJOR(dev_id); 62 cdev_init(&adf_ctl_drv.drv_cdev, &adf_ctl_ops); 63 if (cdev_add(&adf_ctl_drv.drv_cdev, dev_id, 1)) { 64 pr_err("QAT: cdev add failed\n"); 65 goto err_class_destr; 66 } 67 68 drv_device = device_create(adf_ctl_drv.drv_class, NULL, 69 MKDEV(adf_ctl_drv.major, 0), 70 NULL, DEVICE_NAME); 71 if (IS_ERR(drv_device)) { 72 pr_err("QAT: failed to create device\n"); 73 goto err_cdev_del; 74 } 75 return 0; 76err_cdev_del: 77 cdev_del(&adf_ctl_drv.drv_cdev); 78err_class_destr: 79 class_destroy(adf_ctl_drv.drv_class); 80err_chrdev_unreg: 81 unregister_chrdev_region(dev_id, 1); 82 return -EFAULT; 83} 84 85static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data, 86 unsigned long arg) 87{ 88 struct adf_user_cfg_ctl_data *cfg_data; 89 90 cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL); 91 if (!cfg_data) 92 return -ENOMEM; 93 94 /* Initialize device id to NO DEVICE as 0 is a valid device id */ 95 cfg_data->device_id = ADF_CFG_NO_DEVICE; 96 97 if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) { 98 pr_err("QAT: failed to copy from user cfg_data.\n"); 99 kfree(cfg_data); 100 return -EIO; 101 } 102 103 *ctl_data = cfg_data; 104 return 0; 105} 106 107static int adf_add_key_value_data(struct adf_accel_dev *accel_dev, 108 const char *section, 109 const struct adf_user_cfg_key_val *key_val) 110{ 111 if (key_val->type == ADF_HEX) { 112 long *ptr = (long *)key_val->val; 113 long val = *ptr; 114 115 if (adf_cfg_add_key_value_param(accel_dev, section, 116 key_val->key, (void *)val, 117 key_val->type)) { 118 dev_err(&GET_DEV(accel_dev), 119 "failed to add hex keyvalue.\n"); 120 return -EFAULT; 121 } 122 } else { 123 if (adf_cfg_add_key_value_param(accel_dev, section, 124 key_val->key, key_val->val, 125 key_val->type)) { 126 dev_err(&GET_DEV(accel_dev), 127 "failed to add keyvalue.\n"); 128 return -EFAULT; 129 } 130 } 131 return 0; 132} 133 134static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev, 135 struct adf_user_cfg_ctl_data *ctl_data) 136{ 137 struct adf_user_cfg_key_val key_val; 138 struct adf_user_cfg_key_val *params_head; 139 struct adf_user_cfg_section section, *section_head; 140 141 section_head = ctl_data->config_section; 142 143 while (section_head) { 144 if (copy_from_user(§ion, (void __user *)section_head, 145 sizeof(*section_head))) { 146 dev_err(&GET_DEV(accel_dev), 147 "failed to copy section info\n"); 148 goto out_err; 149 } 150 151 if (adf_cfg_section_add(accel_dev, section.name)) { 152 dev_err(&GET_DEV(accel_dev), 153 "failed to add section.\n"); 154 goto out_err; 155 } 156 157 params_head = section.params; 158 159 while (params_head) { 160 if (copy_from_user(&key_val, (void __user *)params_head, 161 sizeof(key_val))) { 162 dev_err(&GET_DEV(accel_dev), 163 "Failed to copy keyvalue.\n"); 164 goto out_err; 165 } 166 if (adf_add_key_value_data(accel_dev, section.name, 167 &key_val)) { 168 goto out_err; 169 } 170 params_head = key_val.next; 171 } 172 section_head = section.next; 173 } 174 return 0; 175out_err: 176 adf_cfg_del_all(accel_dev); 177 return -EFAULT; 178} 179 180static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd, 181 unsigned long arg) 182{ 183 int ret; 184 struct adf_user_cfg_ctl_data *ctl_data; 185 struct adf_accel_dev *accel_dev; 186 187 ret = adf_ctl_alloc_resources(&ctl_data, arg); 188 if (ret) 189 return ret; 190 191 accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id); 192 if (!accel_dev) { 193 ret = -EFAULT; 194 goto out; 195 } 196 197 if (adf_dev_started(accel_dev)) { 198 ret = -EFAULT; 199 goto out; 200 } 201 202 if (adf_copy_key_value_data(accel_dev, ctl_data)) { 203 ret = -EFAULT; 204 goto out; 205 } 206 set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); 207out: 208 kfree(ctl_data); 209 return ret; 210} 211 212static int adf_ctl_is_device_in_use(int id) 213{ 214 struct adf_accel_dev *dev; 215 216 list_for_each_entry(dev, adf_devmgr_get_head(), list) { 217 if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) { 218 if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) { 219 dev_info(&GET_DEV(dev), 220 "device qat_dev%d is busy\n", 221 dev->accel_id); 222 return -EBUSY; 223 } 224 } 225 } 226 return 0; 227} 228 229static void adf_ctl_stop_devices(u32 id) 230{ 231 struct adf_accel_dev *accel_dev; 232 233 list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) { 234 if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) { 235 if (!adf_dev_started(accel_dev)) 236 continue; 237 238 /* First stop all VFs */ 239 if (!accel_dev->is_vf) 240 continue; 241 242 adf_dev_stop(accel_dev); 243 adf_dev_shutdown(accel_dev); 244 } 245 } 246 247 list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) { 248 if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) { 249 if (!adf_dev_started(accel_dev)) 250 continue; 251 252 adf_dev_stop(accel_dev); 253 adf_dev_shutdown(accel_dev); 254 } 255 } 256} 257 258static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd, 259 unsigned long arg) 260{ 261 int ret; 262 struct adf_user_cfg_ctl_data *ctl_data; 263 264 ret = adf_ctl_alloc_resources(&ctl_data, arg); 265 if (ret) 266 return ret; 267 268 if (adf_devmgr_verify_id(ctl_data->device_id)) { 269 pr_err("QAT: Device %d not found\n", ctl_data->device_id); 270 ret = -ENODEV; 271 goto out; 272 } 273 274 ret = adf_ctl_is_device_in_use(ctl_data->device_id); 275 if (ret) 276 goto out; 277 278 if (ctl_data->device_id == ADF_CFG_ALL_DEVICES) 279 pr_info("QAT: Stopping all acceleration devices.\n"); 280 else 281 pr_info("QAT: Stopping acceleration device qat_dev%d.\n", 282 ctl_data->device_id); 283 284 adf_ctl_stop_devices(ctl_data->device_id); 285 286out: 287 kfree(ctl_data); 288 return ret; 289} 290 291static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd, 292 unsigned long arg) 293{ 294 int ret; 295 struct adf_user_cfg_ctl_data *ctl_data; 296 struct adf_accel_dev *accel_dev; 297 298 ret = adf_ctl_alloc_resources(&ctl_data, arg); 299 if (ret) 300 return ret; 301 302 ret = -ENODEV; 303 accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id); 304 if (!accel_dev) 305 goto out; 306 307 if (!adf_dev_started(accel_dev)) { 308 dev_info(&GET_DEV(accel_dev), 309 "Starting acceleration device qat_dev%d.\n", 310 ctl_data->device_id); 311 ret = adf_dev_init(accel_dev); 312 if (!ret) 313 ret = adf_dev_start(accel_dev); 314 } else { 315 dev_info(&GET_DEV(accel_dev), 316 "Acceleration device qat_dev%d already started.\n", 317 ctl_data->device_id); 318 } 319 if (ret) { 320 dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n", 321 ctl_data->device_id); 322 adf_dev_stop(accel_dev); 323 adf_dev_shutdown(accel_dev); 324 } 325out: 326 kfree(ctl_data); 327 return ret; 328} 329 330static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd, 331 unsigned long arg) 332{ 333 u32 num_devices = 0; 334 335 adf_devmgr_get_num_dev(&num_devices); 336 if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices))) 337 return -EFAULT; 338 339 return 0; 340} 341 342static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd, 343 unsigned long arg) 344{ 345 struct adf_hw_device_data *hw_data; 346 struct adf_dev_status_info dev_info; 347 struct adf_accel_dev *accel_dev; 348 349 if (copy_from_user(&dev_info, (void __user *)arg, 350 sizeof(struct adf_dev_status_info))) { 351 pr_err("QAT: failed to copy from user.\n"); 352 return -EFAULT; 353 } 354 355 accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id); 356 if (!accel_dev) 357 return -ENODEV; 358 359 hw_data = accel_dev->hw_device; 360 dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN; 361 dev_info.num_ae = hw_data->get_num_aes(hw_data); 362 dev_info.num_accel = hw_data->get_num_accels(hw_data); 363 dev_info.num_logical_accel = hw_data->num_logical_accel; 364 dev_info.banks_per_accel = hw_data->num_banks 365 / hw_data->num_logical_accel; 366 strlcpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name)); 367 dev_info.instance_id = hw_data->instance_id; 368 dev_info.type = hw_data->dev_class->type; 369 dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number; 370 dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn); 371 dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn); 372 373 if (copy_to_user((void __user *)arg, &dev_info, 374 sizeof(struct adf_dev_status_info))) { 375 dev_err(&GET_DEV(accel_dev), "failed to copy status.\n"); 376 return -EFAULT; 377 } 378 return 0; 379} 380 381static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) 382{ 383 int ret; 384 385 if (mutex_lock_interruptible(&adf_ctl_lock)) 386 return -EFAULT; 387 388 switch (cmd) { 389 case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS: 390 ret = adf_ctl_ioctl_dev_config(fp, cmd, arg); 391 break; 392 393 case IOCTL_STOP_ACCEL_DEV: 394 ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg); 395 break; 396 397 case IOCTL_START_ACCEL_DEV: 398 ret = adf_ctl_ioctl_dev_start(fp, cmd, arg); 399 break; 400 401 case IOCTL_GET_NUM_DEVICES: 402 ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg); 403 break; 404 405 case IOCTL_STATUS_ACCEL_DEV: 406 ret = adf_ctl_ioctl_get_status(fp, cmd, arg); 407 break; 408 default: 409 pr_err_ratelimited("QAT: Invalid ioctl %d\n", cmd); 410 ret = -EFAULT; 411 break; 412 } 413 mutex_unlock(&adf_ctl_lock); 414 return ret; 415} 416 417static int __init adf_register_ctl_device_driver(void) 418{ 419 if (adf_chr_drv_create()) 420 goto err_chr_dev; 421 422 if (adf_init_misc_wq()) 423 goto err_misc_wq; 424 425 if (adf_init_aer()) 426 goto err_aer; 427 428 if (adf_init_pf_wq()) 429 goto err_pf_wq; 430 431 if (adf_init_vf_wq()) 432 goto err_vf_wq; 433 434 if (qat_crypto_register()) 435 goto err_crypto_register; 436 437 return 0; 438 439err_crypto_register: 440 adf_exit_vf_wq(); 441err_vf_wq: 442 adf_exit_pf_wq(); 443err_pf_wq: 444 adf_exit_aer(); 445err_aer: 446 adf_exit_misc_wq(); 447err_misc_wq: 448 adf_chr_drv_destroy(); 449err_chr_dev: 450 mutex_destroy(&adf_ctl_lock); 451 return -EFAULT; 452} 453 454static void __exit adf_unregister_ctl_device_driver(void) 455{ 456 adf_chr_drv_destroy(); 457 adf_exit_misc_wq(); 458 adf_exit_aer(); 459 adf_exit_vf_wq(); 460 adf_exit_pf_wq(); 461 qat_crypto_unregister(); 462 adf_clean_vf_map(false); 463 mutex_destroy(&adf_ctl_lock); 464} 465 466module_init(adf_register_ctl_device_driver); 467module_exit(adf_unregister_ctl_device_driver); 468MODULE_LICENSE("Dual BSD/GPL"); 469MODULE_AUTHOR("Intel"); 470MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); 471MODULE_ALIAS_CRYPTO("intel_qat"); 472MODULE_VERSION(ADF_DRV_VERSION); 473MODULE_IMPORT_NS(CRYPTO_INTERNAL);