cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ap_bus.c (57810B)


      1// SPDX-License-Identifier: GPL-2.0+
      2/*
      3 * Copyright IBM Corp. 2006, 2021
      4 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
      5 *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
      6 *	      Ralph Wuerthner <rwuerthn@de.ibm.com>
      7 *	      Felix Beck <felix.beck@de.ibm.com>
      8 *	      Holger Dengler <hd@linux.vnet.ibm.com>
      9 *	      Harald Freudenberger <freude@linux.ibm.com>
     10 *
     11 * Adjunct processor bus.
     12 */
     13
     14#define KMSG_COMPONENT "ap"
     15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
     16
     17#include <linux/kernel_stat.h>
     18#include <linux/moduleparam.h>
     19#include <linux/init.h>
     20#include <linux/delay.h>
     21#include <linux/err.h>
     22#include <linux/freezer.h>
     23#include <linux/interrupt.h>
     24#include <linux/workqueue.h>
     25#include <linux/slab.h>
     26#include <linux/notifier.h>
     27#include <linux/kthread.h>
     28#include <linux/mutex.h>
     29#include <asm/airq.h>
     30#include <linux/atomic.h>
     31#include <asm/isc.h>
     32#include <linux/hrtimer.h>
     33#include <linux/ktime.h>
     34#include <asm/facility.h>
     35#include <linux/crypto.h>
     36#include <linux/mod_devicetable.h>
     37#include <linux/debugfs.h>
     38#include <linux/ctype.h>
     39#include <linux/module.h>
     40
     41#include "ap_bus.h"
     42#include "ap_debug.h"
     43
     44/*
     45 * Module parameters; note though this file itself isn't modular.
     46 */
     47int ap_domain_index = -1;	/* Adjunct Processor Domain Index */
     48static DEFINE_SPINLOCK(ap_domain_lock);
     49module_param_named(domain, ap_domain_index, int, 0440);
     50MODULE_PARM_DESC(domain, "domain index for ap devices");
     51EXPORT_SYMBOL(ap_domain_index);
     52
     53static int ap_thread_flag;
     54module_param_named(poll_thread, ap_thread_flag, int, 0440);
     55MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
     56
     57static char *apm_str;
     58module_param_named(apmask, apm_str, charp, 0440);
     59MODULE_PARM_DESC(apmask, "AP bus adapter mask.");
     60
     61static char *aqm_str;
     62module_param_named(aqmask, aqm_str, charp, 0440);
     63MODULE_PARM_DESC(aqmask, "AP bus domain mask.");
     64
     65static int ap_useirq = 1;
     66module_param_named(useirq, ap_useirq, int, 0440);
     67MODULE_PARM_DESC(useirq, "Use interrupt if available, default is 1 (on).");
     68
     69atomic_t ap_max_msg_size = ATOMIC_INIT(AP_DEFAULT_MAX_MSG_SIZE);
     70EXPORT_SYMBOL(ap_max_msg_size);
     71
     72static struct device *ap_root_device;
     73
     74/* Hashtable of all queue devices on the AP bus */
     75DEFINE_HASHTABLE(ap_queues, 8);
     76/* lock used for the ap_queues hashtable */
     77DEFINE_SPINLOCK(ap_queues_lock);
     78
     79/* Default permissions (ioctl, card and domain masking) */
     80struct ap_perms ap_perms;
     81EXPORT_SYMBOL(ap_perms);
     82DEFINE_MUTEX(ap_perms_mutex);
     83EXPORT_SYMBOL(ap_perms_mutex);
     84
     85/* # of bus scans since init */
     86static atomic64_t ap_scan_bus_count;
     87
     88/* # of bindings complete since init */
     89static atomic64_t ap_bindings_complete_count = ATOMIC64_INIT(0);
     90
     91/* completion for initial APQN bindings complete */
     92static DECLARE_COMPLETION(ap_init_apqn_bindings_complete);
     93
     94static struct ap_config_info *ap_qci_info;
     95static struct ap_config_info *ap_qci_info_old;
     96
     97/*
     98 * AP bus related debug feature things.
     99 */
    100debug_info_t *ap_dbf_info;
    101
    102/*
    103 * Workqueue timer for bus rescan.
    104 */
    105static struct timer_list ap_config_timer;
    106static int ap_config_time = AP_CONFIG_TIME;
    107static void ap_scan_bus(struct work_struct *);
    108static DECLARE_WORK(ap_scan_work, ap_scan_bus);
    109
    110/*
    111 * Tasklet & timer for AP request polling and interrupts
    112 */
    113static void ap_tasklet_fn(unsigned long);
    114static DECLARE_TASKLET_OLD(ap_tasklet, ap_tasklet_fn);
    115static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
    116static struct task_struct *ap_poll_kthread;
    117static DEFINE_MUTEX(ap_poll_thread_mutex);
    118static DEFINE_SPINLOCK(ap_poll_timer_lock);
    119static struct hrtimer ap_poll_timer;
    120/*
    121 * In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
    122 * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.
    123 */
    124static unsigned long long poll_timeout = 250000;
    125
    126/* Maximum domain id, if not given via qci */
    127static int ap_max_domain_id = 15;
    128/* Maximum adapter id, if not given via qci */
    129static int ap_max_adapter_id = 63;
    130
    131static struct bus_type ap_bus_type;
    132
    133/* Adapter interrupt definitions */
    134static void ap_interrupt_handler(struct airq_struct *airq, bool floating);
    135
    136static bool ap_irq_flag;
    137
    138static struct airq_struct ap_airq = {
    139	.handler = ap_interrupt_handler,
    140	.isc = AP_ISC,
    141};
    142
    143/**
    144 * ap_airq_ptr() - Get the address of the adapter interrupt indicator
    145 *
    146 * Returns the address of the local-summary-indicator of the adapter
    147 * interrupt handler for AP, or NULL if adapter interrupts are not
    148 * available.
    149 */
    150void *ap_airq_ptr(void)
    151{
    152	if (ap_irq_flag)
    153		return ap_airq.lsi_ptr;
    154	return NULL;
    155}
    156
    157/**
    158 * ap_interrupts_available(): Test if AP interrupts are available.
    159 *
    160 * Returns 1 if AP interrupts are available.
    161 */
    162static int ap_interrupts_available(void)
    163{
    164	return test_facility(65);
    165}
    166
    167/**
    168 * ap_qci_available(): Test if AP configuration
    169 * information can be queried via QCI subfunction.
    170 *
    171 * Returns 1 if subfunction PQAP(QCI) is available.
    172 */
    173static int ap_qci_available(void)
    174{
    175	return test_facility(12);
    176}
    177
    178/**
    179 * ap_apft_available(): Test if AP facilities test (APFT)
    180 * facility is available.
    181 *
    182 * Returns 1 if APFT is available.
    183 */
    184static int ap_apft_available(void)
    185{
    186	return test_facility(15);
    187}
    188
    189/*
    190 * ap_qact_available(): Test if the PQAP(QACT) subfunction is available.
    191 *
    192 * Returns 1 if the QACT subfunction is available.
    193 */
    194static inline int ap_qact_available(void)
    195{
    196	if (ap_qci_info)
    197		return ap_qci_info->qact;
    198	return 0;
    199}
    200
    201/*
    202 * ap_fetch_qci_info(): Fetch cryptographic config info
    203 *
    204 * Returns the ap configuration info fetched via PQAP(QCI).
    205 * On success 0 is returned, on failure a negative errno
    206 * is returned, e.g. if the PQAP(QCI) instruction is not
    207 * available, the return value will be -EOPNOTSUPP.
    208 */
    209static inline int ap_fetch_qci_info(struct ap_config_info *info)
    210{
    211	if (!ap_qci_available())
    212		return -EOPNOTSUPP;
    213	if (!info)
    214		return -EINVAL;
    215	return ap_qci(info);
    216}
    217
    218/**
    219 * ap_init_qci_info(): Allocate and query qci config info.
    220 * Does also update the static variables ap_max_domain_id
    221 * and ap_max_adapter_id if this info is available.
    222 */
    223static void __init ap_init_qci_info(void)
    224{
    225	if (!ap_qci_available()) {
    226		AP_DBF_INFO("%s QCI not supported\n", __func__);
    227		return;
    228	}
    229
    230	ap_qci_info = kzalloc(sizeof(*ap_qci_info), GFP_KERNEL);
    231	if (!ap_qci_info)
    232		return;
    233	ap_qci_info_old = kzalloc(sizeof(*ap_qci_info_old), GFP_KERNEL);
    234	if (!ap_qci_info_old)
    235		return;
    236	if (ap_fetch_qci_info(ap_qci_info) != 0) {
    237		kfree(ap_qci_info);
    238		kfree(ap_qci_info_old);
    239		ap_qci_info = NULL;
    240		ap_qci_info_old = NULL;
    241		return;
    242	}
    243	AP_DBF_INFO("%s successful fetched initial qci info\n", __func__);
    244
    245	if (ap_qci_info->apxa) {
    246		if (ap_qci_info->Na) {
    247			ap_max_adapter_id = ap_qci_info->Na;
    248			AP_DBF_INFO("%s new ap_max_adapter_id is %d\n",
    249				    __func__, ap_max_adapter_id);
    250		}
    251		if (ap_qci_info->Nd) {
    252			ap_max_domain_id = ap_qci_info->Nd;
    253			AP_DBF_INFO("%s new ap_max_domain_id is %d\n",
    254				    __func__, ap_max_domain_id);
    255		}
    256	}
    257
    258	memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
    259}
    260
    261/*
    262 * ap_test_config(): helper function to extract the nrth bit
    263 *		     within the unsigned int array field.
    264 */
    265static inline int ap_test_config(unsigned int *field, unsigned int nr)
    266{
    267	return ap_test_bit((field + (nr >> 5)), (nr & 0x1f));
    268}
    269
    270/*
    271 * ap_test_config_card_id(): Test, whether an AP card ID is configured.
    272 *
    273 * Returns 0 if the card is not configured
    274 *	   1 if the card is configured or
    275 *	     if the configuration information is not available
    276 */
    277static inline int ap_test_config_card_id(unsigned int id)
    278{
    279	if (id > ap_max_adapter_id)
    280		return 0;
    281	if (ap_qci_info)
    282		return ap_test_config(ap_qci_info->apm, id);
    283	return 1;
    284}
    285
    286/*
    287 * ap_test_config_usage_domain(): Test, whether an AP usage domain
    288 * is configured.
    289 *
    290 * Returns 0 if the usage domain is not configured
    291 *	   1 if the usage domain is configured or
    292 *	     if the configuration information is not available
    293 */
    294int ap_test_config_usage_domain(unsigned int domain)
    295{
    296	if (domain > ap_max_domain_id)
    297		return 0;
    298	if (ap_qci_info)
    299		return ap_test_config(ap_qci_info->aqm, domain);
    300	return 1;
    301}
    302EXPORT_SYMBOL(ap_test_config_usage_domain);
    303
    304/*
    305 * ap_test_config_ctrl_domain(): Test, whether an AP control domain
    306 * is configured.
    307 * @domain AP control domain ID
    308 *
    309 * Returns 1 if the control domain is configured
    310 *	   0 in all other cases
    311 */
    312int ap_test_config_ctrl_domain(unsigned int domain)
    313{
    314	if (!ap_qci_info || domain > ap_max_domain_id)
    315		return 0;
    316	return ap_test_config(ap_qci_info->adm, domain);
    317}
    318EXPORT_SYMBOL(ap_test_config_ctrl_domain);
    319
    320/*
    321 * ap_queue_info(): Check and get AP queue info.
    322 * Returns true if TAPQ succeeded and the info is filled or
    323 * false otherwise.
    324 */
    325static bool ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac,
    326			  int *q_depth, int *q_ml, bool *q_decfg, bool *q_cstop)
    327{
    328	struct ap_queue_status status;
    329	union {
    330		unsigned long value;
    331		struct {
    332			unsigned int fac   : 32; /* facility bits */
    333			unsigned int at	   :  8; /* ap type */
    334			unsigned int _res1 :  8;
    335			unsigned int _res2 :  4;
    336			unsigned int ml	   :  4; /* apxl ml */
    337			unsigned int _res3 :  4;
    338			unsigned int qd	   :  4; /* queue depth */
    339		} tapq_gr2;
    340	} tapq_info;
    341
    342	tapq_info.value = 0;
    343
    344	/* make sure we don't run into a specifiation exception */
    345	if (AP_QID_CARD(qid) > ap_max_adapter_id ||
    346	    AP_QID_QUEUE(qid) > ap_max_domain_id)
    347		return false;
    348
    349	/* call TAPQ on this APQN */
    350	status = ap_test_queue(qid, ap_apft_available(), &tapq_info.value);
    351	switch (status.response_code) {
    352	case AP_RESPONSE_NORMAL:
    353	case AP_RESPONSE_RESET_IN_PROGRESS:
    354	case AP_RESPONSE_DECONFIGURED:
    355	case AP_RESPONSE_CHECKSTOPPED:
    356	case AP_RESPONSE_BUSY:
    357		/*
    358		 * According to the architecture in all these cases the
    359		 * info should be filled. All bits 0 is not possible as
    360		 * there is at least one of the mode bits set.
    361		 */
    362		if (WARN_ON_ONCE(!tapq_info.value))
    363			return false;
    364		*q_type = tapq_info.tapq_gr2.at;
    365		*q_fac = tapq_info.tapq_gr2.fac;
    366		*q_depth = tapq_info.tapq_gr2.qd;
    367		*q_ml = tapq_info.tapq_gr2.ml;
    368		*q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED;
    369		*q_cstop = status.response_code == AP_RESPONSE_CHECKSTOPPED;
    370		switch (*q_type) {
    371			/* For CEX2 and CEX3 the available functions
    372			 * are not reflected by the facilities bits.
    373			 * Instead it is coded into the type. So here
    374			 * modify the function bits based on the type.
    375			 */
    376		case AP_DEVICE_TYPE_CEX2A:
    377		case AP_DEVICE_TYPE_CEX3A:
    378			*q_fac |= 0x08000000;
    379			break;
    380		case AP_DEVICE_TYPE_CEX2C:
    381		case AP_DEVICE_TYPE_CEX3C:
    382			*q_fac |= 0x10000000;
    383			break;
    384		default:
    385			break;
    386		}
    387		return true;
    388	default:
    389		/*
    390		 * A response code which indicates, there is no info available.
    391		 */
    392		return false;
    393	}
    394}
    395
    396void ap_wait(enum ap_sm_wait wait)
    397{
    398	ktime_t hr_time;
    399
    400	switch (wait) {
    401	case AP_SM_WAIT_AGAIN:
    402	case AP_SM_WAIT_INTERRUPT:
    403		if (ap_irq_flag)
    404			break;
    405		if (ap_poll_kthread) {
    406			wake_up(&ap_poll_wait);
    407			break;
    408		}
    409		fallthrough;
    410	case AP_SM_WAIT_TIMEOUT:
    411		spin_lock_bh(&ap_poll_timer_lock);
    412		if (!hrtimer_is_queued(&ap_poll_timer)) {
    413			hr_time = poll_timeout;
    414			hrtimer_forward_now(&ap_poll_timer, hr_time);
    415			hrtimer_restart(&ap_poll_timer);
    416		}
    417		spin_unlock_bh(&ap_poll_timer_lock);
    418		break;
    419	case AP_SM_WAIT_NONE:
    420	default:
    421		break;
    422	}
    423}
    424
    425/**
    426 * ap_request_timeout(): Handling of request timeouts
    427 * @t: timer making this callback
    428 *
    429 * Handles request timeouts.
    430 */
    431void ap_request_timeout(struct timer_list *t)
    432{
    433	struct ap_queue *aq = from_timer(aq, t, timeout);
    434
    435	spin_lock_bh(&aq->lock);
    436	ap_wait(ap_sm_event(aq, AP_SM_EVENT_TIMEOUT));
    437	spin_unlock_bh(&aq->lock);
    438}
    439
    440/**
    441 * ap_poll_timeout(): AP receive polling for finished AP requests.
    442 * @unused: Unused pointer.
    443 *
    444 * Schedules the AP tasklet using a high resolution timer.
    445 */
    446static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
    447{
    448	tasklet_schedule(&ap_tasklet);
    449	return HRTIMER_NORESTART;
    450}
    451
    452/**
    453 * ap_interrupt_handler() - Schedule ap_tasklet on interrupt
    454 * @airq: pointer to adapter interrupt descriptor
    455 * @floating: ignored
    456 */
    457static void ap_interrupt_handler(struct airq_struct *airq, bool floating)
    458{
    459	inc_irq_stat(IRQIO_APB);
    460	tasklet_schedule(&ap_tasklet);
    461}
    462
    463/**
    464 * ap_tasklet_fn(): Tasklet to poll all AP devices.
    465 * @dummy: Unused variable
    466 *
    467 * Poll all AP devices on the bus.
    468 */
    469static void ap_tasklet_fn(unsigned long dummy)
    470{
    471	int bkt;
    472	struct ap_queue *aq;
    473	enum ap_sm_wait wait = AP_SM_WAIT_NONE;
    474
    475	/* Reset the indicator if interrupts are used. Thus new interrupts can
    476	 * be received. Doing it in the beginning of the tasklet is therefor
    477	 * important that no requests on any AP get lost.
    478	 */
    479	if (ap_irq_flag)
    480		xchg(ap_airq.lsi_ptr, 0);
    481
    482	spin_lock_bh(&ap_queues_lock);
    483	hash_for_each(ap_queues, bkt, aq, hnode) {
    484		spin_lock_bh(&aq->lock);
    485		wait = min(wait, ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
    486		spin_unlock_bh(&aq->lock);
    487	}
    488	spin_unlock_bh(&ap_queues_lock);
    489
    490	ap_wait(wait);
    491}
    492
    493static int ap_pending_requests(void)
    494{
    495	int bkt;
    496	struct ap_queue *aq;
    497
    498	spin_lock_bh(&ap_queues_lock);
    499	hash_for_each(ap_queues, bkt, aq, hnode) {
    500		if (aq->queue_count == 0)
    501			continue;
    502		spin_unlock_bh(&ap_queues_lock);
    503		return 1;
    504	}
    505	spin_unlock_bh(&ap_queues_lock);
    506	return 0;
    507}
    508
    509/**
    510 * ap_poll_thread(): Thread that polls for finished requests.
    511 * @data: Unused pointer
    512 *
    513 * AP bus poll thread. The purpose of this thread is to poll for
    514 * finished requests in a loop if there is a "free" cpu - that is
    515 * a cpu that doesn't have anything better to do. The polling stops
    516 * as soon as there is another task or if all messages have been
    517 * delivered.
    518 */
    519static int ap_poll_thread(void *data)
    520{
    521	DECLARE_WAITQUEUE(wait, current);
    522
    523	set_user_nice(current, MAX_NICE);
    524	set_freezable();
    525	while (!kthread_should_stop()) {
    526		add_wait_queue(&ap_poll_wait, &wait);
    527		set_current_state(TASK_INTERRUPTIBLE);
    528		if (!ap_pending_requests()) {
    529			schedule();
    530			try_to_freeze();
    531		}
    532		set_current_state(TASK_RUNNING);
    533		remove_wait_queue(&ap_poll_wait, &wait);
    534		if (need_resched()) {
    535			schedule();
    536			try_to_freeze();
    537			continue;
    538		}
    539		ap_tasklet_fn(0);
    540	}
    541
    542	return 0;
    543}
    544
    545static int ap_poll_thread_start(void)
    546{
    547	int rc;
    548
    549	if (ap_irq_flag || ap_poll_kthread)
    550		return 0;
    551	mutex_lock(&ap_poll_thread_mutex);
    552	ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
    553	rc = PTR_ERR_OR_ZERO(ap_poll_kthread);
    554	if (rc)
    555		ap_poll_kthread = NULL;
    556	mutex_unlock(&ap_poll_thread_mutex);
    557	return rc;
    558}
    559
    560static void ap_poll_thread_stop(void)
    561{
    562	if (!ap_poll_kthread)
    563		return;
    564	mutex_lock(&ap_poll_thread_mutex);
    565	kthread_stop(ap_poll_kthread);
    566	ap_poll_kthread = NULL;
    567	mutex_unlock(&ap_poll_thread_mutex);
    568}
    569
    570#define is_card_dev(x) ((x)->parent == ap_root_device)
    571#define is_queue_dev(x) ((x)->parent != ap_root_device)
    572
    573/**
    574 * ap_bus_match()
    575 * @dev: Pointer to device
    576 * @drv: Pointer to device_driver
    577 *
    578 * AP bus driver registration/unregistration.
    579 */
    580static int ap_bus_match(struct device *dev, struct device_driver *drv)
    581{
    582	struct ap_driver *ap_drv = to_ap_drv(drv);
    583	struct ap_device_id *id;
    584
    585	/*
    586	 * Compare device type of the device with the list of
    587	 * supported types of the device_driver.
    588	 */
    589	for (id = ap_drv->ids; id->match_flags; id++) {
    590		if (is_card_dev(dev) &&
    591		    id->match_flags & AP_DEVICE_ID_MATCH_CARD_TYPE &&
    592		    id->dev_type == to_ap_dev(dev)->device_type)
    593			return 1;
    594		if (is_queue_dev(dev) &&
    595		    id->match_flags & AP_DEVICE_ID_MATCH_QUEUE_TYPE &&
    596		    id->dev_type == to_ap_dev(dev)->device_type)
    597			return 1;
    598	}
    599	return 0;
    600}
    601
    602/**
    603 * ap_uevent(): Uevent function for AP devices.
    604 * @dev: Pointer to device
    605 * @env: Pointer to kobj_uevent_env
    606 *
    607 * It sets up a single environment variable DEV_TYPE which contains the
    608 * hardware device type.
    609 */
    610static int ap_uevent(struct device *dev, struct kobj_uevent_env *env)
    611{
    612	int rc = 0;
    613	struct ap_device *ap_dev = to_ap_dev(dev);
    614
    615	/* Uevents from ap bus core don't need extensions to the env */
    616	if (dev == ap_root_device)
    617		return 0;
    618
    619	if (is_card_dev(dev)) {
    620		struct ap_card *ac = to_ap_card(&ap_dev->device);
    621
    622		/* Set up DEV_TYPE environment variable. */
    623		rc = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
    624		if (rc)
    625			return rc;
    626		/* Add MODALIAS= */
    627		rc = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
    628		if (rc)
    629			return rc;
    630
    631		/* Add MODE=<accel|cca|ep11> */
    632		if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL))
    633			rc = add_uevent_var(env, "MODE=accel");
    634		else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
    635			rc = add_uevent_var(env, "MODE=cca");
    636		else if (ap_test_bit(&ac->functions, AP_FUNC_EP11))
    637			rc = add_uevent_var(env, "MODE=ep11");
    638		if (rc)
    639			return rc;
    640	} else {
    641		struct ap_queue *aq = to_ap_queue(&ap_dev->device);
    642
    643		/* Add MODE=<accel|cca|ep11> */
    644		if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL))
    645			rc = add_uevent_var(env, "MODE=accel");
    646		else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
    647			rc = add_uevent_var(env, "MODE=cca");
    648		else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11))
    649			rc = add_uevent_var(env, "MODE=ep11");
    650		if (rc)
    651			return rc;
    652	}
    653
    654	return 0;
    655}
    656
    657static void ap_send_init_scan_done_uevent(void)
    658{
    659	char *envp[] = { "INITSCAN=done", NULL };
    660
    661	kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp);
    662}
    663
    664static void ap_send_bindings_complete_uevent(void)
    665{
    666	char buf[32];
    667	char *envp[] = { "BINDINGS=complete", buf, NULL };
    668
    669	snprintf(buf, sizeof(buf), "COMPLETECOUNT=%llu",
    670		 atomic64_inc_return(&ap_bindings_complete_count));
    671	kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp);
    672}
    673
    674void ap_send_config_uevent(struct ap_device *ap_dev, bool cfg)
    675{
    676	char buf[16];
    677	char *envp[] = { buf, NULL };
    678
    679	snprintf(buf, sizeof(buf), "CONFIG=%d", cfg ? 1 : 0);
    680
    681	kobject_uevent_env(&ap_dev->device.kobj, KOBJ_CHANGE, envp);
    682}
    683EXPORT_SYMBOL(ap_send_config_uevent);
    684
    685void ap_send_online_uevent(struct ap_device *ap_dev, int online)
    686{
    687	char buf[16];
    688	char *envp[] = { buf, NULL };
    689
    690	snprintf(buf, sizeof(buf), "ONLINE=%d", online ? 1 : 0);
    691
    692	kobject_uevent_env(&ap_dev->device.kobj, KOBJ_CHANGE, envp);
    693}
    694EXPORT_SYMBOL(ap_send_online_uevent);
    695
    696static void ap_send_mask_changed_uevent(unsigned long *newapm,
    697					unsigned long *newaqm)
    698{
    699	char buf[100];
    700	char *envp[] = { buf, NULL };
    701
    702	if (newapm)
    703		snprintf(buf, sizeof(buf),
    704			 "APMASK=0x%016lx%016lx%016lx%016lx\n",
    705			 newapm[0], newapm[1], newapm[2], newapm[3]);
    706	else
    707		snprintf(buf, sizeof(buf),
    708			 "AQMASK=0x%016lx%016lx%016lx%016lx\n",
    709			 newaqm[0], newaqm[1], newaqm[2], newaqm[3]);
    710
    711	kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp);
    712}
    713
    714/*
    715 * calc # of bound APQNs
    716 */
    717
    718struct __ap_calc_ctrs {
    719	unsigned int apqns;
    720	unsigned int bound;
    721};
    722
    723static int __ap_calc_helper(struct device *dev, void *arg)
    724{
    725	struct __ap_calc_ctrs *pctrs = (struct __ap_calc_ctrs *)arg;
    726
    727	if (is_queue_dev(dev)) {
    728		pctrs->apqns++;
    729		if (dev->driver)
    730			pctrs->bound++;
    731	}
    732
    733	return 0;
    734}
    735
    736static void ap_calc_bound_apqns(unsigned int *apqns, unsigned int *bound)
    737{
    738	struct __ap_calc_ctrs ctrs;
    739
    740	memset(&ctrs, 0, sizeof(ctrs));
    741	bus_for_each_dev(&ap_bus_type, NULL, (void *)&ctrs, __ap_calc_helper);
    742
    743	*apqns = ctrs.apqns;
    744	*bound = ctrs.bound;
    745}
    746
    747/*
    748 * After initial ap bus scan do check if all existing APQNs are
    749 * bound to device drivers.
    750 */
    751static void ap_check_bindings_complete(void)
    752{
    753	unsigned int apqns, bound;
    754
    755	if (atomic64_read(&ap_scan_bus_count) >= 1) {
    756		ap_calc_bound_apqns(&apqns, &bound);
    757		if (bound == apqns) {
    758			if (!completion_done(&ap_init_apqn_bindings_complete)) {
    759				complete_all(&ap_init_apqn_bindings_complete);
    760				AP_DBF_INFO("%s complete\n", __func__);
    761			}
    762			ap_send_bindings_complete_uevent();
    763		}
    764	}
    765}
    766
    767/*
    768 * Interface to wait for the AP bus to have done one initial ap bus
    769 * scan and all detected APQNs have been bound to device drivers.
    770 * If these both conditions are not fulfilled, this function blocks
    771 * on a condition with wait_for_completion_interruptible_timeout().
    772 * If these both conditions are fulfilled (before the timeout hits)
    773 * the return value is 0. If the timeout (in jiffies) hits instead
    774 * -ETIME is returned. On failures negative return values are
    775 * returned to the caller.
    776 */
    777int ap_wait_init_apqn_bindings_complete(unsigned long timeout)
    778{
    779	long l;
    780
    781	if (completion_done(&ap_init_apqn_bindings_complete))
    782		return 0;
    783
    784	if (timeout)
    785		l = wait_for_completion_interruptible_timeout(
    786			&ap_init_apqn_bindings_complete, timeout);
    787	else
    788		l = wait_for_completion_interruptible(
    789			&ap_init_apqn_bindings_complete);
    790	if (l < 0)
    791		return l == -ERESTARTSYS ? -EINTR : l;
    792	else if (l == 0 && timeout)
    793		return -ETIME;
    794
    795	return 0;
    796}
    797EXPORT_SYMBOL(ap_wait_init_apqn_bindings_complete);
    798
    799static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
    800{
    801	if (is_queue_dev(dev) &&
    802	    AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long)data)
    803		device_unregister(dev);
    804	return 0;
    805}
    806
    807static int __ap_revise_reserved(struct device *dev, void *dummy)
    808{
    809	int rc, card, queue, devres, drvres;
    810
    811	if (is_queue_dev(dev)) {
    812		card = AP_QID_CARD(to_ap_queue(dev)->qid);
    813		queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
    814		mutex_lock(&ap_perms_mutex);
    815		devres = test_bit_inv(card, ap_perms.apm) &&
    816			test_bit_inv(queue, ap_perms.aqm);
    817		mutex_unlock(&ap_perms_mutex);
    818		drvres = to_ap_drv(dev->driver)->flags
    819			& AP_DRIVER_FLAG_DEFAULT;
    820		if (!!devres != !!drvres) {
    821			AP_DBF_DBG("%s reprobing queue=%02x.%04x\n",
    822				   __func__, card, queue);
    823			rc = device_reprobe(dev);
    824			if (rc)
    825				AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n",
    826					    __func__, card, queue);
    827		}
    828	}
    829
    830	return 0;
    831}
    832
    833static void ap_bus_revise_bindings(void)
    834{
    835	bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_revise_reserved);
    836}
    837
    838int ap_owned_by_def_drv(int card, int queue)
    839{
    840	int rc = 0;
    841
    842	if (card < 0 || card >= AP_DEVICES || queue < 0 || queue >= AP_DOMAINS)
    843		return -EINVAL;
    844
    845	mutex_lock(&ap_perms_mutex);
    846
    847	if (test_bit_inv(card, ap_perms.apm) &&
    848	    test_bit_inv(queue, ap_perms.aqm))
    849		rc = 1;
    850
    851	mutex_unlock(&ap_perms_mutex);
    852
    853	return rc;
    854}
    855EXPORT_SYMBOL(ap_owned_by_def_drv);
    856
    857int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
    858				       unsigned long *aqm)
    859{
    860	int card, queue, rc = 0;
    861
    862	mutex_lock(&ap_perms_mutex);
    863
    864	for (card = 0; !rc && card < AP_DEVICES; card++)
    865		if (test_bit_inv(card, apm) &&
    866		    test_bit_inv(card, ap_perms.apm))
    867			for (queue = 0; !rc && queue < AP_DOMAINS; queue++)
    868				if (test_bit_inv(queue, aqm) &&
    869				    test_bit_inv(queue, ap_perms.aqm))
    870					rc = 1;
    871
    872	mutex_unlock(&ap_perms_mutex);
    873
    874	return rc;
    875}
    876EXPORT_SYMBOL(ap_apqn_in_matrix_owned_by_def_drv);
    877
    878static int ap_device_probe(struct device *dev)
    879{
    880	struct ap_device *ap_dev = to_ap_dev(dev);
    881	struct ap_driver *ap_drv = to_ap_drv(dev->driver);
    882	int card, queue, devres, drvres, rc = -ENODEV;
    883
    884	if (!get_device(dev))
    885		return rc;
    886
    887	if (is_queue_dev(dev)) {
    888		/*
    889		 * If the apqn is marked as reserved/used by ap bus and
    890		 * default drivers, only probe with drivers with the default
    891		 * flag set. If it is not marked, only probe with drivers
    892		 * with the default flag not set.
    893		 */
    894		card = AP_QID_CARD(to_ap_queue(dev)->qid);
    895		queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
    896		mutex_lock(&ap_perms_mutex);
    897		devres = test_bit_inv(card, ap_perms.apm) &&
    898			test_bit_inv(queue, ap_perms.aqm);
    899		mutex_unlock(&ap_perms_mutex);
    900		drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
    901		if (!!devres != !!drvres)
    902			goto out;
    903	}
    904
    905	/* Add queue/card to list of active queues/cards */
    906	spin_lock_bh(&ap_queues_lock);
    907	if (is_queue_dev(dev))
    908		hash_add(ap_queues, &to_ap_queue(dev)->hnode,
    909			 to_ap_queue(dev)->qid);
    910	spin_unlock_bh(&ap_queues_lock);
    911
    912	rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
    913
    914	if (rc) {
    915		spin_lock_bh(&ap_queues_lock);
    916		if (is_queue_dev(dev))
    917			hash_del(&to_ap_queue(dev)->hnode);
    918		spin_unlock_bh(&ap_queues_lock);
    919	} else {
    920		ap_check_bindings_complete();
    921	}
    922
    923out:
    924	if (rc)
    925		put_device(dev);
    926	return rc;
    927}
    928
    929static void ap_device_remove(struct device *dev)
    930{
    931	struct ap_device *ap_dev = to_ap_dev(dev);
    932	struct ap_driver *ap_drv = to_ap_drv(dev->driver);
    933
    934	/* prepare ap queue device removal */
    935	if (is_queue_dev(dev))
    936		ap_queue_prepare_remove(to_ap_queue(dev));
    937
    938	/* driver's chance to clean up gracefully */
    939	if (ap_drv->remove)
    940		ap_drv->remove(ap_dev);
    941
    942	/* now do the ap queue device remove */
    943	if (is_queue_dev(dev))
    944		ap_queue_remove(to_ap_queue(dev));
    945
    946	/* Remove queue/card from list of active queues/cards */
    947	spin_lock_bh(&ap_queues_lock);
    948	if (is_queue_dev(dev))
    949		hash_del(&to_ap_queue(dev)->hnode);
    950	spin_unlock_bh(&ap_queues_lock);
    951
    952	put_device(dev);
    953}
    954
    955struct ap_queue *ap_get_qdev(ap_qid_t qid)
    956{
    957	int bkt;
    958	struct ap_queue *aq;
    959
    960	spin_lock_bh(&ap_queues_lock);
    961	hash_for_each(ap_queues, bkt, aq, hnode) {
    962		if (aq->qid == qid) {
    963			get_device(&aq->ap_dev.device);
    964			spin_unlock_bh(&ap_queues_lock);
    965			return aq;
    966		}
    967	}
    968	spin_unlock_bh(&ap_queues_lock);
    969
    970	return NULL;
    971}
    972EXPORT_SYMBOL(ap_get_qdev);
    973
    974int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
    975		       char *name)
    976{
    977	struct device_driver *drv = &ap_drv->driver;
    978
    979	drv->bus = &ap_bus_type;
    980	drv->owner = owner;
    981	drv->name = name;
    982	return driver_register(drv);
    983}
    984EXPORT_SYMBOL(ap_driver_register);
    985
    986void ap_driver_unregister(struct ap_driver *ap_drv)
    987{
    988	driver_unregister(&ap_drv->driver);
    989}
    990EXPORT_SYMBOL(ap_driver_unregister);
    991
    992void ap_bus_force_rescan(void)
    993{
    994	/* processing a asynchronous bus rescan */
    995	del_timer(&ap_config_timer);
    996	queue_work(system_long_wq, &ap_scan_work);
    997	flush_work(&ap_scan_work);
    998}
    999EXPORT_SYMBOL(ap_bus_force_rescan);
   1000
   1001/*
   1002 * A config change has happened, force an ap bus rescan.
   1003 */
   1004void ap_bus_cfg_chg(void)
   1005{
   1006	AP_DBF_DBG("%s config change, forcing bus rescan\n", __func__);
   1007
   1008	ap_bus_force_rescan();
   1009}
   1010
   1011/*
   1012 * hex2bitmap() - parse hex mask string and set bitmap.
   1013 * Valid strings are "0x012345678" with at least one valid hex number.
   1014 * Rest of the bitmap to the right is padded with 0. No spaces allowed
   1015 * within the string, the leading 0x may be omitted.
   1016 * Returns the bitmask with exactly the bits set as given by the hex
   1017 * string (both in big endian order).
   1018 */
   1019static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
   1020{
   1021	int i, n, b;
   1022
   1023	/* bits needs to be a multiple of 8 */
   1024	if (bits & 0x07)
   1025		return -EINVAL;
   1026
   1027	if (str[0] == '0' && str[1] == 'x')
   1028		str++;
   1029	if (*str == 'x')
   1030		str++;
   1031
   1032	for (i = 0; isxdigit(*str) && i < bits; str++) {
   1033		b = hex_to_bin(*str);
   1034		for (n = 0; n < 4; n++)
   1035			if (b & (0x08 >> n))
   1036				set_bit_inv(i + n, bitmap);
   1037		i += 4;
   1038	}
   1039
   1040	if (*str == '\n')
   1041		str++;
   1042	if (*str)
   1043		return -EINVAL;
   1044	return 0;
   1045}
   1046
   1047/*
   1048 * modify_bitmap() - parse bitmask argument and modify an existing
   1049 * bit mask accordingly. A concatenation (done with ',') of these
   1050 * terms is recognized:
   1051 *   +<bitnr>[-<bitnr>] or -<bitnr>[-<bitnr>]
   1052 * <bitnr> may be any valid number (hex, decimal or octal) in the range
   1053 * 0...bits-1; the leading + or - is required. Here are some examples:
   1054 *   +0-15,+32,-128,-0xFF
   1055 *   -0-255,+1-16,+0x128
   1056 *   +1,+2,+3,+4,-5,-7-10
   1057 * Returns the new bitmap after all changes have been applied. Every
   1058 * positive value in the string will set a bit and every negative value
   1059 * in the string will clear a bit. As a bit may be touched more than once,
   1060 * the last 'operation' wins:
   1061 * +0-255,-128 = first bits 0-255 will be set, then bit 128 will be
   1062 * cleared again. All other bits are unmodified.
   1063 */
   1064static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
   1065{
   1066	int a, i, z;
   1067	char *np, sign;
   1068
   1069	/* bits needs to be a multiple of 8 */
   1070	if (bits & 0x07)
   1071		return -EINVAL;
   1072
   1073	while (*str) {
   1074		sign = *str++;
   1075		if (sign != '+' && sign != '-')
   1076			return -EINVAL;
   1077		a = z = simple_strtoul(str, &np, 0);
   1078		if (str == np || a >= bits)
   1079			return -EINVAL;
   1080		str = np;
   1081		if (*str == '-') {
   1082			z = simple_strtoul(++str, &np, 0);
   1083			if (str == np || a > z || z >= bits)
   1084				return -EINVAL;
   1085			str = np;
   1086		}
   1087		for (i = a; i <= z; i++)
   1088			if (sign == '+')
   1089				set_bit_inv(i, bitmap);
   1090			else
   1091				clear_bit_inv(i, bitmap);
   1092		while (*str == ',' || *str == '\n')
   1093			str++;
   1094	}
   1095
   1096	return 0;
   1097}
   1098
   1099static int ap_parse_bitmap_str(const char *str, unsigned long *bitmap, int bits,
   1100			       unsigned long *newmap)
   1101{
   1102	unsigned long size;
   1103	int rc;
   1104
   1105	size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
   1106	if (*str == '+' || *str == '-') {
   1107		memcpy(newmap, bitmap, size);
   1108		rc = modify_bitmap(str, newmap, bits);
   1109	} else {
   1110		memset(newmap, 0, size);
   1111		rc = hex2bitmap(str, newmap, bits);
   1112	}
   1113	return rc;
   1114}
   1115
   1116int ap_parse_mask_str(const char *str,
   1117		      unsigned long *bitmap, int bits,
   1118		      struct mutex *lock)
   1119{
   1120	unsigned long *newmap, size;
   1121	int rc;
   1122
   1123	/* bits needs to be a multiple of 8 */
   1124	if (bits & 0x07)
   1125		return -EINVAL;
   1126
   1127	size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
   1128	newmap = kmalloc(size, GFP_KERNEL);
   1129	if (!newmap)
   1130		return -ENOMEM;
   1131	if (mutex_lock_interruptible(lock)) {
   1132		kfree(newmap);
   1133		return -ERESTARTSYS;
   1134	}
   1135	rc = ap_parse_bitmap_str(str, bitmap, bits, newmap);
   1136	if (rc == 0)
   1137		memcpy(bitmap, newmap, size);
   1138	mutex_unlock(lock);
   1139	kfree(newmap);
   1140	return rc;
   1141}
   1142EXPORT_SYMBOL(ap_parse_mask_str);
   1143
   1144/*
   1145 * AP bus attributes.
   1146 */
   1147
   1148static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
   1149{
   1150	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
   1151}
   1152
   1153static ssize_t ap_domain_store(struct bus_type *bus,
   1154			       const char *buf, size_t count)
   1155{
   1156	int domain;
   1157
   1158	if (sscanf(buf, "%i\n", &domain) != 1 ||
   1159	    domain < 0 || domain > ap_max_domain_id ||
   1160	    !test_bit_inv(domain, ap_perms.aqm))
   1161		return -EINVAL;
   1162
   1163	spin_lock_bh(&ap_domain_lock);
   1164	ap_domain_index = domain;
   1165	spin_unlock_bh(&ap_domain_lock);
   1166
   1167	AP_DBF_INFO("%s stored new default domain=%d\n",
   1168		    __func__, domain);
   1169
   1170	return count;
   1171}
   1172
   1173static BUS_ATTR_RW(ap_domain);
   1174
   1175static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
   1176{
   1177	if (!ap_qci_info)	/* QCI not supported */
   1178		return scnprintf(buf, PAGE_SIZE, "not supported\n");
   1179
   1180	return scnprintf(buf, PAGE_SIZE,
   1181			 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
   1182			 ap_qci_info->adm[0], ap_qci_info->adm[1],
   1183			 ap_qci_info->adm[2], ap_qci_info->adm[3],
   1184			 ap_qci_info->adm[4], ap_qci_info->adm[5],
   1185			 ap_qci_info->adm[6], ap_qci_info->adm[7]);
   1186}
   1187
   1188static BUS_ATTR_RO(ap_control_domain_mask);
   1189
   1190static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf)
   1191{
   1192	if (!ap_qci_info)	/* QCI not supported */
   1193		return scnprintf(buf, PAGE_SIZE, "not supported\n");
   1194
   1195	return scnprintf(buf, PAGE_SIZE,
   1196			 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
   1197			 ap_qci_info->aqm[0], ap_qci_info->aqm[1],
   1198			 ap_qci_info->aqm[2], ap_qci_info->aqm[3],
   1199			 ap_qci_info->aqm[4], ap_qci_info->aqm[5],
   1200			 ap_qci_info->aqm[6], ap_qci_info->aqm[7]);
   1201}
   1202
   1203static BUS_ATTR_RO(ap_usage_domain_mask);
   1204
   1205static ssize_t ap_adapter_mask_show(struct bus_type *bus, char *buf)
   1206{
   1207	if (!ap_qci_info)	/* QCI not supported */
   1208		return scnprintf(buf, PAGE_SIZE, "not supported\n");
   1209
   1210	return scnprintf(buf, PAGE_SIZE,
   1211			 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
   1212			 ap_qci_info->apm[0], ap_qci_info->apm[1],
   1213			 ap_qci_info->apm[2], ap_qci_info->apm[3],
   1214			 ap_qci_info->apm[4], ap_qci_info->apm[5],
   1215			 ap_qci_info->apm[6], ap_qci_info->apm[7]);
   1216}
   1217
   1218static BUS_ATTR_RO(ap_adapter_mask);
   1219
   1220static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
   1221{
   1222	return scnprintf(buf, PAGE_SIZE, "%d\n",
   1223			 ap_irq_flag ? 1 : 0);
   1224}
   1225
   1226static BUS_ATTR_RO(ap_interrupts);
   1227
   1228static ssize_t config_time_show(struct bus_type *bus, char *buf)
   1229{
   1230	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
   1231}
   1232
   1233static ssize_t config_time_store(struct bus_type *bus,
   1234				 const char *buf, size_t count)
   1235{
   1236	int time;
   1237
   1238	if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
   1239		return -EINVAL;
   1240	ap_config_time = time;
   1241	mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
   1242	return count;
   1243}
   1244
   1245static BUS_ATTR_RW(config_time);
   1246
   1247static ssize_t poll_thread_show(struct bus_type *bus, char *buf)
   1248{
   1249	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
   1250}
   1251
   1252static ssize_t poll_thread_store(struct bus_type *bus,
   1253				 const char *buf, size_t count)
   1254{
   1255	int flag, rc;
   1256
   1257	if (sscanf(buf, "%d\n", &flag) != 1)
   1258		return -EINVAL;
   1259	if (flag) {
   1260		rc = ap_poll_thread_start();
   1261		if (rc)
   1262			count = rc;
   1263	} else {
   1264		ap_poll_thread_stop();
   1265	}
   1266	return count;
   1267}
   1268
   1269static BUS_ATTR_RW(poll_thread);
   1270
   1271static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
   1272{
   1273	return scnprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
   1274}
   1275
   1276static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
   1277				  size_t count)
   1278{
   1279	unsigned long long time;
   1280	ktime_t hr_time;
   1281
   1282	/* 120 seconds = maximum poll interval */
   1283	if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 ||
   1284	    time > 120000000000ULL)
   1285		return -EINVAL;
   1286	poll_timeout = time;
   1287	hr_time = poll_timeout;
   1288
   1289	spin_lock_bh(&ap_poll_timer_lock);
   1290	hrtimer_cancel(&ap_poll_timer);
   1291	hrtimer_set_expires(&ap_poll_timer, hr_time);
   1292	hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
   1293	spin_unlock_bh(&ap_poll_timer_lock);
   1294
   1295	return count;
   1296}
   1297
   1298static BUS_ATTR_RW(poll_timeout);
   1299
   1300static ssize_t ap_max_domain_id_show(struct bus_type *bus, char *buf)
   1301{
   1302	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_max_domain_id);
   1303}
   1304
   1305static BUS_ATTR_RO(ap_max_domain_id);
   1306
   1307static ssize_t ap_max_adapter_id_show(struct bus_type *bus, char *buf)
   1308{
   1309	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_max_adapter_id);
   1310}
   1311
   1312static BUS_ATTR_RO(ap_max_adapter_id);
   1313
   1314static ssize_t apmask_show(struct bus_type *bus, char *buf)
   1315{
   1316	int rc;
   1317
   1318	if (mutex_lock_interruptible(&ap_perms_mutex))
   1319		return -ERESTARTSYS;
   1320	rc = scnprintf(buf, PAGE_SIZE,
   1321		       "0x%016lx%016lx%016lx%016lx\n",
   1322		       ap_perms.apm[0], ap_perms.apm[1],
   1323		       ap_perms.apm[2], ap_perms.apm[3]);
   1324	mutex_unlock(&ap_perms_mutex);
   1325
   1326	return rc;
   1327}
   1328
   1329static int __verify_card_reservations(struct device_driver *drv, void *data)
   1330{
   1331	int rc = 0;
   1332	struct ap_driver *ap_drv = to_ap_drv(drv);
   1333	unsigned long *newapm = (unsigned long *)data;
   1334
   1335	/*
   1336	 * increase the driver's module refcounter to be sure it is not
   1337	 * going away when we invoke the callback function.
   1338	 */
   1339	if (!try_module_get(drv->owner))
   1340		return 0;
   1341
   1342	if (ap_drv->in_use) {
   1343		rc = ap_drv->in_use(newapm, ap_perms.aqm);
   1344		if (rc)
   1345			rc = -EBUSY;
   1346	}
   1347
   1348	/* release the driver's module */
   1349	module_put(drv->owner);
   1350
   1351	return rc;
   1352}
   1353
   1354static int apmask_commit(unsigned long *newapm)
   1355{
   1356	int rc;
   1357	unsigned long reserved[BITS_TO_LONGS(AP_DEVICES)];
   1358
   1359	/*
   1360	 * Check if any bits in the apmask have been set which will
   1361	 * result in queues being removed from non-default drivers
   1362	 */
   1363	if (bitmap_andnot(reserved, newapm, ap_perms.apm, AP_DEVICES)) {
   1364		rc = bus_for_each_drv(&ap_bus_type, NULL, reserved,
   1365				      __verify_card_reservations);
   1366		if (rc)
   1367			return rc;
   1368	}
   1369
   1370	memcpy(ap_perms.apm, newapm, APMASKSIZE);
   1371
   1372	return 0;
   1373}
   1374
   1375static ssize_t apmask_store(struct bus_type *bus, const char *buf,
   1376			    size_t count)
   1377{
   1378	int rc, changes = 0;
   1379	DECLARE_BITMAP(newapm, AP_DEVICES);
   1380
   1381	if (mutex_lock_interruptible(&ap_perms_mutex))
   1382		return -ERESTARTSYS;
   1383
   1384	rc = ap_parse_bitmap_str(buf, ap_perms.apm, AP_DEVICES, newapm);
   1385	if (rc)
   1386		goto done;
   1387
   1388	changes = memcmp(ap_perms.apm, newapm, APMASKSIZE);
   1389	if (changes)
   1390		rc = apmask_commit(newapm);
   1391
   1392done:
   1393	mutex_unlock(&ap_perms_mutex);
   1394	if (rc)
   1395		return rc;
   1396
   1397	if (changes) {
   1398		ap_bus_revise_bindings();
   1399		ap_send_mask_changed_uevent(newapm, NULL);
   1400	}
   1401
   1402	return count;
   1403}
   1404
   1405static BUS_ATTR_RW(apmask);
   1406
   1407static ssize_t aqmask_show(struct bus_type *bus, char *buf)
   1408{
   1409	int rc;
   1410
   1411	if (mutex_lock_interruptible(&ap_perms_mutex))
   1412		return -ERESTARTSYS;
   1413	rc = scnprintf(buf, PAGE_SIZE,
   1414		       "0x%016lx%016lx%016lx%016lx\n",
   1415		       ap_perms.aqm[0], ap_perms.aqm[1],
   1416		       ap_perms.aqm[2], ap_perms.aqm[3]);
   1417	mutex_unlock(&ap_perms_mutex);
   1418
   1419	return rc;
   1420}
   1421
   1422static int __verify_queue_reservations(struct device_driver *drv, void *data)
   1423{
   1424	int rc = 0;
   1425	struct ap_driver *ap_drv = to_ap_drv(drv);
   1426	unsigned long *newaqm = (unsigned long *)data;
   1427
   1428	/*
   1429	 * increase the driver's module refcounter to be sure it is not
   1430	 * going away when we invoke the callback function.
   1431	 */
   1432	if (!try_module_get(drv->owner))
   1433		return 0;
   1434
   1435	if (ap_drv->in_use) {
   1436		rc = ap_drv->in_use(ap_perms.apm, newaqm);
   1437		if (rc)
   1438			return -EBUSY;
   1439	}
   1440
   1441	/* release the driver's module */
   1442	module_put(drv->owner);
   1443
   1444	return rc;
   1445}
   1446
   1447static int aqmask_commit(unsigned long *newaqm)
   1448{
   1449	int rc;
   1450	unsigned long reserved[BITS_TO_LONGS(AP_DOMAINS)];
   1451
   1452	/*
   1453	 * Check if any bits in the aqmask have been set which will
   1454	 * result in queues being removed from non-default drivers
   1455	 */
   1456	if (bitmap_andnot(reserved, newaqm, ap_perms.aqm, AP_DOMAINS)) {
   1457		rc = bus_for_each_drv(&ap_bus_type, NULL, reserved,
   1458				      __verify_queue_reservations);
   1459		if (rc)
   1460			return rc;
   1461	}
   1462
   1463	memcpy(ap_perms.aqm, newaqm, AQMASKSIZE);
   1464
   1465	return 0;
   1466}
   1467
   1468static ssize_t aqmask_store(struct bus_type *bus, const char *buf,
   1469			    size_t count)
   1470{
   1471	int rc, changes = 0;
   1472	DECLARE_BITMAP(newaqm, AP_DOMAINS);
   1473
   1474	if (mutex_lock_interruptible(&ap_perms_mutex))
   1475		return -ERESTARTSYS;
   1476
   1477	rc = ap_parse_bitmap_str(buf, ap_perms.aqm, AP_DOMAINS, newaqm);
   1478	if (rc)
   1479		goto done;
   1480
   1481	changes = memcmp(ap_perms.aqm, newaqm, APMASKSIZE);
   1482	if (changes)
   1483		rc = aqmask_commit(newaqm);
   1484
   1485done:
   1486	mutex_unlock(&ap_perms_mutex);
   1487	if (rc)
   1488		return rc;
   1489
   1490	if (changes) {
   1491		ap_bus_revise_bindings();
   1492		ap_send_mask_changed_uevent(NULL, newaqm);
   1493	}
   1494
   1495	return count;
   1496}
   1497
   1498static BUS_ATTR_RW(aqmask);
   1499
   1500static ssize_t scans_show(struct bus_type *bus, char *buf)
   1501{
   1502	return scnprintf(buf, PAGE_SIZE, "%llu\n",
   1503			 atomic64_read(&ap_scan_bus_count));
   1504}
   1505
   1506static ssize_t scans_store(struct bus_type *bus, const char *buf,
   1507			   size_t count)
   1508{
   1509	AP_DBF_INFO("%s force AP bus rescan\n", __func__);
   1510
   1511	ap_bus_force_rescan();
   1512
   1513	return count;
   1514}
   1515
   1516static BUS_ATTR_RW(scans);
   1517
   1518static ssize_t bindings_show(struct bus_type *bus, char *buf)
   1519{
   1520	int rc;
   1521	unsigned int apqns, n;
   1522
   1523	ap_calc_bound_apqns(&apqns, &n);
   1524	if (atomic64_read(&ap_scan_bus_count) >= 1 && n == apqns)
   1525		rc = scnprintf(buf, PAGE_SIZE, "%u/%u (complete)\n", n, apqns);
   1526	else
   1527		rc = scnprintf(buf, PAGE_SIZE, "%u/%u\n", n, apqns);
   1528
   1529	return rc;
   1530}
   1531
   1532static BUS_ATTR_RO(bindings);
   1533
   1534static struct attribute *ap_bus_attrs[] = {
   1535	&bus_attr_ap_domain.attr,
   1536	&bus_attr_ap_control_domain_mask.attr,
   1537	&bus_attr_ap_usage_domain_mask.attr,
   1538	&bus_attr_ap_adapter_mask.attr,
   1539	&bus_attr_config_time.attr,
   1540	&bus_attr_poll_thread.attr,
   1541	&bus_attr_ap_interrupts.attr,
   1542	&bus_attr_poll_timeout.attr,
   1543	&bus_attr_ap_max_domain_id.attr,
   1544	&bus_attr_ap_max_adapter_id.attr,
   1545	&bus_attr_apmask.attr,
   1546	&bus_attr_aqmask.attr,
   1547	&bus_attr_scans.attr,
   1548	&bus_attr_bindings.attr,
   1549	NULL,
   1550};
   1551ATTRIBUTE_GROUPS(ap_bus);
   1552
   1553static struct bus_type ap_bus_type = {
   1554	.name = "ap",
   1555	.bus_groups = ap_bus_groups,
   1556	.match = &ap_bus_match,
   1557	.uevent = &ap_uevent,
   1558	.probe = ap_device_probe,
   1559	.remove = ap_device_remove,
   1560};
   1561
   1562/**
   1563 * ap_select_domain(): Select an AP domain if possible and we haven't
   1564 * already done so before.
   1565 */
   1566static void ap_select_domain(void)
   1567{
   1568	struct ap_queue_status status;
   1569	int card, dom;
   1570
   1571	/*
   1572	 * Choose the default domain. Either the one specified with
   1573	 * the "domain=" parameter or the first domain with at least
   1574	 * one valid APQN.
   1575	 */
   1576	spin_lock_bh(&ap_domain_lock);
   1577	if (ap_domain_index >= 0) {
   1578		/* Domain has already been selected. */
   1579		goto out;
   1580	}
   1581	for (dom = 0; dom <= ap_max_domain_id; dom++) {
   1582		if (!ap_test_config_usage_domain(dom) ||
   1583		    !test_bit_inv(dom, ap_perms.aqm))
   1584			continue;
   1585		for (card = 0; card <= ap_max_adapter_id; card++) {
   1586			if (!ap_test_config_card_id(card) ||
   1587			    !test_bit_inv(card, ap_perms.apm))
   1588				continue;
   1589			status = ap_test_queue(AP_MKQID(card, dom),
   1590					       ap_apft_available(),
   1591					       NULL);
   1592			if (status.response_code == AP_RESPONSE_NORMAL)
   1593				break;
   1594		}
   1595		if (card <= ap_max_adapter_id)
   1596			break;
   1597	}
   1598	if (dom <= ap_max_domain_id) {
   1599		ap_domain_index = dom;
   1600		AP_DBF_INFO("%s new default domain is %d\n",
   1601			    __func__, ap_domain_index);
   1602	}
   1603out:
   1604	spin_unlock_bh(&ap_domain_lock);
   1605}
   1606
   1607/*
   1608 * This function checks the type and returns either 0 for not
   1609 * supported or the highest compatible type value (which may
   1610 * include the input type value).
   1611 */
   1612static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
   1613{
   1614	int comp_type = 0;
   1615
   1616	/* < CEX2A is not supported */
   1617	if (rawtype < AP_DEVICE_TYPE_CEX2A) {
   1618		AP_DBF_WARN("%s queue=%02x.%04x unsupported type %d\n",
   1619			    __func__, AP_QID_CARD(qid),
   1620			    AP_QID_QUEUE(qid), rawtype);
   1621		return 0;
   1622	}
   1623	/* up to CEX8 known and fully supported */
   1624	if (rawtype <= AP_DEVICE_TYPE_CEX8)
   1625		return rawtype;
   1626	/*
   1627	 * unknown new type > CEX8, check for compatibility
   1628	 * to the highest known and supported type which is
   1629	 * currently CEX8 with the help of the QACT function.
   1630	 */
   1631	if (ap_qact_available()) {
   1632		struct ap_queue_status status;
   1633		union ap_qact_ap_info apinfo = {0};
   1634
   1635		apinfo.mode = (func >> 26) & 0x07;
   1636		apinfo.cat = AP_DEVICE_TYPE_CEX8;
   1637		status = ap_qact(qid, 0, &apinfo);
   1638		if (status.response_code == AP_RESPONSE_NORMAL &&
   1639		    apinfo.cat >= AP_DEVICE_TYPE_CEX2A &&
   1640		    apinfo.cat <= AP_DEVICE_TYPE_CEX8)
   1641			comp_type = apinfo.cat;
   1642	}
   1643	if (!comp_type)
   1644		AP_DBF_WARN("%s queue=%02x.%04x unable to map type %d\n",
   1645			    __func__, AP_QID_CARD(qid),
   1646			    AP_QID_QUEUE(qid), rawtype);
   1647	else if (comp_type != rawtype)
   1648		AP_DBF_INFO("%s queue=%02x.%04x map type %d to %d\n",
   1649			    __func__, AP_QID_CARD(qid), AP_QID_QUEUE(qid),
   1650			    rawtype, comp_type);
   1651	return comp_type;
   1652}
   1653
   1654/*
   1655 * Helper function to be used with bus_find_dev
   1656 * matches for the card device with the given id
   1657 */
   1658static int __match_card_device_with_id(struct device *dev, const void *data)
   1659{
   1660	return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long)(void *)data;
   1661}
   1662
   1663/*
   1664 * Helper function to be used with bus_find_dev
   1665 * matches for the queue device with a given qid
   1666 */
   1667static int __match_queue_device_with_qid(struct device *dev, const void *data)
   1668{
   1669	return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long)data;
   1670}
   1671
   1672/*
   1673 * Helper function to be used with bus_find_dev
   1674 * matches any queue device with given queue id
   1675 */
   1676static int __match_queue_device_with_queue_id(struct device *dev, const void *data)
   1677{
   1678	return is_queue_dev(dev) &&
   1679		AP_QID_QUEUE(to_ap_queue(dev)->qid) == (int)(long)data;
   1680}
   1681
   1682/* Helper function for notify_config_changed */
   1683static int __drv_notify_config_changed(struct device_driver *drv, void *data)
   1684{
   1685	struct ap_driver *ap_drv = to_ap_drv(drv);
   1686
   1687	if (try_module_get(drv->owner)) {
   1688		if (ap_drv->on_config_changed)
   1689			ap_drv->on_config_changed(ap_qci_info, ap_qci_info_old);
   1690		module_put(drv->owner);
   1691	}
   1692
   1693	return 0;
   1694}
   1695
   1696/* Notify all drivers about an qci config change */
   1697static inline void notify_config_changed(void)
   1698{
   1699	bus_for_each_drv(&ap_bus_type, NULL, NULL,
   1700			 __drv_notify_config_changed);
   1701}
   1702
   1703/* Helper function for notify_scan_complete */
   1704static int __drv_notify_scan_complete(struct device_driver *drv, void *data)
   1705{
   1706	struct ap_driver *ap_drv = to_ap_drv(drv);
   1707
   1708	if (try_module_get(drv->owner)) {
   1709		if (ap_drv->on_scan_complete)
   1710			ap_drv->on_scan_complete(ap_qci_info,
   1711						 ap_qci_info_old);
   1712		module_put(drv->owner);
   1713	}
   1714
   1715	return 0;
   1716}
   1717
   1718/* Notify all drivers about bus scan complete */
   1719static inline void notify_scan_complete(void)
   1720{
   1721	bus_for_each_drv(&ap_bus_type, NULL, NULL,
   1722			 __drv_notify_scan_complete);
   1723}
   1724
   1725/*
   1726 * Helper function for ap_scan_bus().
   1727 * Remove card device and associated queue devices.
   1728 */
   1729static inline void ap_scan_rm_card_dev_and_queue_devs(struct ap_card *ac)
   1730{
   1731	bus_for_each_dev(&ap_bus_type, NULL,
   1732			 (void *)(long)ac->id,
   1733			 __ap_queue_devices_with_id_unregister);
   1734	device_unregister(&ac->ap_dev.device);
   1735}
   1736
   1737/*
   1738 * Helper function for ap_scan_bus().
   1739 * Does the scan bus job for all the domains within
   1740 * a valid adapter given by an ap_card ptr.
   1741 */
   1742static inline void ap_scan_domains(struct ap_card *ac)
   1743{
   1744	bool decfg, chkstop;
   1745	ap_qid_t qid;
   1746	unsigned int func;
   1747	struct device *dev;
   1748	struct ap_queue *aq;
   1749	int rc, dom, depth, type, ml;
   1750
   1751	/*
   1752	 * Go through the configuration for the domains and compare them
   1753	 * to the existing queue devices. Also take care of the config
   1754	 * and error state for the queue devices.
   1755	 */
   1756
   1757	for (dom = 0; dom <= ap_max_domain_id; dom++) {
   1758		qid = AP_MKQID(ac->id, dom);
   1759		dev = bus_find_device(&ap_bus_type, NULL,
   1760				      (void *)(long)qid,
   1761				      __match_queue_device_with_qid);
   1762		aq = dev ? to_ap_queue(dev) : NULL;
   1763		if (!ap_test_config_usage_domain(dom)) {
   1764			if (dev) {
   1765				AP_DBF_INFO("%s(%d,%d) not in config anymore, rm queue dev\n",
   1766					    __func__, ac->id, dom);
   1767				device_unregister(dev);
   1768				put_device(dev);
   1769			}
   1770			continue;
   1771		}
   1772		/* domain is valid, get info from this APQN */
   1773		if (!ap_queue_info(qid, &type, &func, &depth,
   1774				   &ml, &decfg, &chkstop)) {
   1775			if (aq) {
   1776				AP_DBF_INFO("%s(%d,%d) queue_info() failed, rm queue dev\n",
   1777					    __func__, ac->id, dom);
   1778				device_unregister(dev);
   1779				put_device(dev);
   1780			}
   1781			continue;
   1782		}
   1783		/* if no queue device exists, create a new one */
   1784		if (!aq) {
   1785			aq = ap_queue_create(qid, ac->ap_dev.device_type);
   1786			if (!aq) {
   1787				AP_DBF_WARN("%s(%d,%d) ap_queue_create() failed\n",
   1788					    __func__, ac->id, dom);
   1789				continue;
   1790			}
   1791			aq->card = ac;
   1792			aq->config = !decfg;
   1793			aq->chkstop = chkstop;
   1794			dev = &aq->ap_dev.device;
   1795			dev->bus = &ap_bus_type;
   1796			dev->parent = &ac->ap_dev.device;
   1797			dev_set_name(dev, "%02x.%04x", ac->id, dom);
   1798			/* register queue device */
   1799			rc = device_register(dev);
   1800			if (rc) {
   1801				AP_DBF_WARN("%s(%d,%d) device_register() failed\n",
   1802					    __func__, ac->id, dom);
   1803				goto put_dev_and_continue;
   1804			}
   1805			/* get it and thus adjust reference counter */
   1806			get_device(dev);
   1807			if (decfg)
   1808				AP_DBF_INFO("%s(%d,%d) new (decfg) queue dev created\n",
   1809					    __func__, ac->id, dom);
   1810			else if (chkstop)
   1811				AP_DBF_INFO("%s(%d,%d) new (chkstop) queue dev created\n",
   1812					    __func__, ac->id, dom);
   1813			else
   1814				AP_DBF_INFO("%s(%d,%d) new queue dev created\n",
   1815					    __func__, ac->id, dom);
   1816			goto put_dev_and_continue;
   1817		}
   1818		/* handle state changes on already existing queue device */
   1819		spin_lock_bh(&aq->lock);
   1820		/* checkstop state */
   1821		if (chkstop && !aq->chkstop) {
   1822			/* checkstop on */
   1823			aq->chkstop = true;
   1824			if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
   1825				aq->dev_state = AP_DEV_STATE_ERROR;
   1826				aq->last_err_rc = AP_RESPONSE_CHECKSTOPPED;
   1827			}
   1828			spin_unlock_bh(&aq->lock);
   1829			AP_DBF_DBG("%s(%d,%d) queue dev checkstop on\n",
   1830				   __func__, ac->id, dom);
   1831			/* 'receive' pending messages with -EAGAIN */
   1832			ap_flush_queue(aq);
   1833			goto put_dev_and_continue;
   1834		} else if (!chkstop && aq->chkstop) {
   1835			/* checkstop off */
   1836			aq->chkstop = false;
   1837			if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
   1838				aq->dev_state = AP_DEV_STATE_OPERATING;
   1839				aq->sm_state = AP_SM_STATE_RESET_START;
   1840			}
   1841			spin_unlock_bh(&aq->lock);
   1842			AP_DBF_DBG("%s(%d,%d) queue dev checkstop off\n",
   1843				   __func__, ac->id, dom);
   1844			goto put_dev_and_continue;
   1845		}
   1846		/* config state change */
   1847		if (decfg && aq->config) {
   1848			/* config off this queue device */
   1849			aq->config = false;
   1850			if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
   1851				aq->dev_state = AP_DEV_STATE_ERROR;
   1852				aq->last_err_rc = AP_RESPONSE_DECONFIGURED;
   1853			}
   1854			spin_unlock_bh(&aq->lock);
   1855			AP_DBF_DBG("%s(%d,%d) queue dev config off\n",
   1856				   __func__, ac->id, dom);
   1857			ap_send_config_uevent(&aq->ap_dev, aq->config);
   1858			/* 'receive' pending messages with -EAGAIN */
   1859			ap_flush_queue(aq);
   1860			goto put_dev_and_continue;
   1861		} else if (!decfg && !aq->config) {
   1862			/* config on this queue device */
   1863			aq->config = true;
   1864			if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
   1865				aq->dev_state = AP_DEV_STATE_OPERATING;
   1866				aq->sm_state = AP_SM_STATE_RESET_START;
   1867			}
   1868			spin_unlock_bh(&aq->lock);
   1869			AP_DBF_DBG("%s(%d,%d) queue dev config on\n",
   1870				   __func__, ac->id, dom);
   1871			ap_send_config_uevent(&aq->ap_dev, aq->config);
   1872			goto put_dev_and_continue;
   1873		}
   1874		/* handle other error states */
   1875		if (!decfg && aq->dev_state == AP_DEV_STATE_ERROR) {
   1876			spin_unlock_bh(&aq->lock);
   1877			/* 'receive' pending messages with -EAGAIN */
   1878			ap_flush_queue(aq);
   1879			/* re-init (with reset) the queue device */
   1880			ap_queue_init_state(aq);
   1881			AP_DBF_INFO("%s(%d,%d) queue dev reinit enforced\n",
   1882				    __func__, ac->id, dom);
   1883			goto put_dev_and_continue;
   1884		}
   1885		spin_unlock_bh(&aq->lock);
   1886put_dev_and_continue:
   1887		put_device(dev);
   1888	}
   1889}
   1890
   1891/*
   1892 * Helper function for ap_scan_bus().
   1893 * Does the scan bus job for the given adapter id.
   1894 */
   1895static inline void ap_scan_adapter(int ap)
   1896{
   1897	bool decfg, chkstop;
   1898	ap_qid_t qid;
   1899	unsigned int func;
   1900	struct device *dev;
   1901	struct ap_card *ac;
   1902	int rc, dom, depth, type, comp_type, ml;
   1903
   1904	/* Is there currently a card device for this adapter ? */
   1905	dev = bus_find_device(&ap_bus_type, NULL,
   1906			      (void *)(long)ap,
   1907			      __match_card_device_with_id);
   1908	ac = dev ? to_ap_card(dev) : NULL;
   1909
   1910	/* Adapter not in configuration ? */
   1911	if (!ap_test_config_card_id(ap)) {
   1912		if (ac) {
   1913			AP_DBF_INFO("%s(%d) ap not in config any more, rm card and queue devs\n",
   1914				    __func__, ap);
   1915			ap_scan_rm_card_dev_and_queue_devs(ac);
   1916			put_device(dev);
   1917		}
   1918		return;
   1919	}
   1920
   1921	/*
   1922	 * Adapter ap is valid in the current configuration. So do some checks:
   1923	 * If no card device exists, build one. If a card device exists, check
   1924	 * for type and functions changed. For all this we need to find a valid
   1925	 * APQN first.
   1926	 */
   1927
   1928	for (dom = 0; dom <= ap_max_domain_id; dom++)
   1929		if (ap_test_config_usage_domain(dom)) {
   1930			qid = AP_MKQID(ap, dom);
   1931			if (ap_queue_info(qid, &type, &func, &depth,
   1932					  &ml, &decfg, &chkstop))
   1933				break;
   1934		}
   1935	if (dom > ap_max_domain_id) {
   1936		/* Could not find a valid APQN for this adapter */
   1937		if (ac) {
   1938			AP_DBF_INFO("%s(%d) no type info (no APQN found), rm card and queue devs\n",
   1939				    __func__, ap);
   1940			ap_scan_rm_card_dev_and_queue_devs(ac);
   1941			put_device(dev);
   1942		} else {
   1943			AP_DBF_DBG("%s(%d) no type info (no APQN found), ignored\n",
   1944				   __func__, ap);
   1945		}
   1946		return;
   1947	}
   1948	if (!type) {
   1949		/* No apdater type info available, an unusable adapter */
   1950		if (ac) {
   1951			AP_DBF_INFO("%s(%d) no valid type (0) info, rm card and queue devs\n",
   1952				    __func__, ap);
   1953			ap_scan_rm_card_dev_and_queue_devs(ac);
   1954			put_device(dev);
   1955		} else {
   1956			AP_DBF_DBG("%s(%d) no valid type (0) info, ignored\n",
   1957				   __func__, ap);
   1958		}
   1959		return;
   1960	}
   1961
   1962	if (ac) {
   1963		/* Check APQN against existing card device for changes */
   1964		if (ac->raw_hwtype != type) {
   1965			AP_DBF_INFO("%s(%d) hwtype %d changed, rm card and queue devs\n",
   1966				    __func__, ap, type);
   1967			ap_scan_rm_card_dev_and_queue_devs(ac);
   1968			put_device(dev);
   1969			ac = NULL;
   1970		} else if (ac->functions != func) {
   1971			AP_DBF_INFO("%s(%d) functions 0x%08x changed, rm card and queue devs\n",
   1972				    __func__, ap, type);
   1973			ap_scan_rm_card_dev_and_queue_devs(ac);
   1974			put_device(dev);
   1975			ac = NULL;
   1976		} else {
   1977			/* handle checkstop state change */
   1978			if (chkstop && !ac->chkstop) {
   1979				/* checkstop on */
   1980				ac->chkstop = true;
   1981				AP_DBF_INFO("%s(%d) card dev checkstop on\n",
   1982					    __func__, ap);
   1983			} else if (!chkstop && ac->chkstop) {
   1984				/* checkstop off */
   1985				ac->chkstop = false;
   1986				AP_DBF_INFO("%s(%d) card dev checkstop off\n",
   1987					    __func__, ap);
   1988			}
   1989			/* handle config state change */
   1990			if (decfg && ac->config) {
   1991				ac->config = false;
   1992				AP_DBF_INFO("%s(%d) card dev config off\n",
   1993					    __func__, ap);
   1994				ap_send_config_uevent(&ac->ap_dev, ac->config);
   1995			} else if (!decfg && !ac->config) {
   1996				ac->config = true;
   1997				AP_DBF_INFO("%s(%d) card dev config on\n",
   1998					    __func__, ap);
   1999				ap_send_config_uevent(&ac->ap_dev, ac->config);
   2000			}
   2001		}
   2002	}
   2003
   2004	if (!ac) {
   2005		/* Build a new card device */
   2006		comp_type = ap_get_compatible_type(qid, type, func);
   2007		if (!comp_type) {
   2008			AP_DBF_WARN("%s(%d) type %d, can't get compatibility type\n",
   2009				    __func__, ap, type);
   2010			return;
   2011		}
   2012		ac = ap_card_create(ap, depth, type, comp_type, func, ml);
   2013		if (!ac) {
   2014			AP_DBF_WARN("%s(%d) ap_card_create() failed\n",
   2015				    __func__, ap);
   2016			return;
   2017		}
   2018		ac->config = !decfg;
   2019		ac->chkstop = chkstop;
   2020		dev = &ac->ap_dev.device;
   2021		dev->bus = &ap_bus_type;
   2022		dev->parent = ap_root_device;
   2023		dev_set_name(dev, "card%02x", ap);
   2024		/* maybe enlarge ap_max_msg_size to support this card */
   2025		if (ac->maxmsgsize > atomic_read(&ap_max_msg_size)) {
   2026			atomic_set(&ap_max_msg_size, ac->maxmsgsize);
   2027			AP_DBF_INFO("%s(%d) ap_max_msg_size update to %d byte\n",
   2028				    __func__, ap,
   2029				    atomic_read(&ap_max_msg_size));
   2030		}
   2031		/* Register the new card device with AP bus */
   2032		rc = device_register(dev);
   2033		if (rc) {
   2034			AP_DBF_WARN("%s(%d) device_register() failed\n",
   2035				    __func__, ap);
   2036			put_device(dev);
   2037			return;
   2038		}
   2039		/* get it and thus adjust reference counter */
   2040		get_device(dev);
   2041		if (decfg)
   2042			AP_DBF_INFO("%s(%d) new (decfg) card dev type=%d func=0x%08x created\n",
   2043				    __func__, ap, type, func);
   2044		else if (chkstop)
   2045			AP_DBF_INFO("%s(%d) new (chkstop) card dev type=%d func=0x%08x created\n",
   2046				    __func__, ap, type, func);
   2047		else
   2048			AP_DBF_INFO("%s(%d) new card dev type=%d func=0x%08x created\n",
   2049				    __func__, ap, type, func);
   2050	}
   2051
   2052	/* Verify the domains and the queue devices for this card */
   2053	ap_scan_domains(ac);
   2054
   2055	/* release the card device */
   2056	put_device(&ac->ap_dev.device);
   2057}
   2058
   2059/**
   2060 * ap_get_configuration - get the host AP configuration
   2061 *
   2062 * Stores the host AP configuration information returned from the previous call
   2063 * to Query Configuration Information (QCI), then retrieves and stores the
   2064 * current AP configuration returned from QCI.
   2065 *
   2066 * Return: true if the host AP configuration changed between calls to QCI;
   2067 * otherwise, return false.
   2068 */
   2069static bool ap_get_configuration(void)
   2070{
   2071	memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
   2072	ap_fetch_qci_info(ap_qci_info);
   2073
   2074	return memcmp(ap_qci_info, ap_qci_info_old,
   2075		      sizeof(struct ap_config_info)) != 0;
   2076}
   2077
   2078/**
   2079 * ap_scan_bus(): Scan the AP bus for new devices
   2080 * Runs periodically, workqueue timer (ap_config_time)
   2081 * @unused: Unused pointer.
   2082 */
   2083static void ap_scan_bus(struct work_struct *unused)
   2084{
   2085	int ap, config_changed = 0;
   2086
   2087	/* config change notify */
   2088	config_changed = ap_get_configuration();
   2089	if (config_changed)
   2090		notify_config_changed();
   2091	ap_select_domain();
   2092
   2093	AP_DBF_DBG("%s running\n", __func__);
   2094
   2095	/* loop over all possible adapters */
   2096	for (ap = 0; ap <= ap_max_adapter_id; ap++)
   2097		ap_scan_adapter(ap);
   2098
   2099	/* scan complete notify */
   2100	if (config_changed)
   2101		notify_scan_complete();
   2102
   2103	/* check if there is at least one queue available with default domain */
   2104	if (ap_domain_index >= 0) {
   2105		struct device *dev =
   2106			bus_find_device(&ap_bus_type, NULL,
   2107					(void *)(long)ap_domain_index,
   2108					__match_queue_device_with_queue_id);
   2109		if (dev)
   2110			put_device(dev);
   2111		else
   2112			AP_DBF_INFO("%s no queue device with default domain %d available\n",
   2113				    __func__, ap_domain_index);
   2114	}
   2115
   2116	if (atomic64_inc_return(&ap_scan_bus_count) == 1) {
   2117		AP_DBF_DBG("%s init scan complete\n", __func__);
   2118		ap_send_init_scan_done_uevent();
   2119		ap_check_bindings_complete();
   2120	}
   2121
   2122	mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
   2123}
   2124
   2125static void ap_config_timeout(struct timer_list *unused)
   2126{
   2127	queue_work(system_long_wq, &ap_scan_work);
   2128}
   2129
   2130static int __init ap_debug_init(void)
   2131{
   2132	ap_dbf_info = debug_register("ap", 2, 1,
   2133				     DBF_MAX_SPRINTF_ARGS * sizeof(long));
   2134	debug_register_view(ap_dbf_info, &debug_sprintf_view);
   2135	debug_set_level(ap_dbf_info, DBF_ERR);
   2136
   2137	return 0;
   2138}
   2139
   2140static void __init ap_perms_init(void)
   2141{
   2142	/* all resources usable if no kernel parameter string given */
   2143	memset(&ap_perms.ioctlm, 0xFF, sizeof(ap_perms.ioctlm));
   2144	memset(&ap_perms.apm, 0xFF, sizeof(ap_perms.apm));
   2145	memset(&ap_perms.aqm, 0xFF, sizeof(ap_perms.aqm));
   2146
   2147	/* apm kernel parameter string */
   2148	if (apm_str) {
   2149		memset(&ap_perms.apm, 0, sizeof(ap_perms.apm));
   2150		ap_parse_mask_str(apm_str, ap_perms.apm, AP_DEVICES,
   2151				  &ap_perms_mutex);
   2152	}
   2153
   2154	/* aqm kernel parameter string */
   2155	if (aqm_str) {
   2156		memset(&ap_perms.aqm, 0, sizeof(ap_perms.aqm));
   2157		ap_parse_mask_str(aqm_str, ap_perms.aqm, AP_DOMAINS,
   2158				  &ap_perms_mutex);
   2159	}
   2160}
   2161
   2162/**
   2163 * ap_module_init(): The module initialization code.
   2164 *
   2165 * Initializes the module.
   2166 */
   2167static int __init ap_module_init(void)
   2168{
   2169	int rc;
   2170
   2171	rc = ap_debug_init();
   2172	if (rc)
   2173		return rc;
   2174
   2175	if (!ap_instructions_available()) {
   2176		pr_warn("The hardware system does not support AP instructions\n");
   2177		return -ENODEV;
   2178	}
   2179
   2180	/* init ap_queue hashtable */
   2181	hash_init(ap_queues);
   2182
   2183	/* set up the AP permissions (ioctls, ap and aq masks) */
   2184	ap_perms_init();
   2185
   2186	/* Get AP configuration data if available */
   2187	ap_init_qci_info();
   2188
   2189	/* check default domain setting */
   2190	if (ap_domain_index < -1 || ap_domain_index > ap_max_domain_id ||
   2191	    (ap_domain_index >= 0 &&
   2192	     !test_bit_inv(ap_domain_index, ap_perms.aqm))) {
   2193		pr_warn("%d is not a valid cryptographic domain\n",
   2194			ap_domain_index);
   2195		ap_domain_index = -1;
   2196	}
   2197
   2198	/* enable interrupts if available */
   2199	if (ap_interrupts_available() && ap_useirq) {
   2200		rc = register_adapter_interrupt(&ap_airq);
   2201		ap_irq_flag = (rc == 0);
   2202	}
   2203
   2204	/* Create /sys/bus/ap. */
   2205	rc = bus_register(&ap_bus_type);
   2206	if (rc)
   2207		goto out;
   2208
   2209	/* Create /sys/devices/ap. */
   2210	ap_root_device = root_device_register("ap");
   2211	rc = PTR_ERR_OR_ZERO(ap_root_device);
   2212	if (rc)
   2213		goto out_bus;
   2214	ap_root_device->bus = &ap_bus_type;
   2215
   2216	/* Setup the AP bus rescan timer. */
   2217	timer_setup(&ap_config_timer, ap_config_timeout, 0);
   2218
   2219	/*
   2220	 * Setup the high resultion poll timer.
   2221	 * If we are running under z/VM adjust polling to z/VM polling rate.
   2222	 */
   2223	if (MACHINE_IS_VM)
   2224		poll_timeout = 1500000;
   2225	hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
   2226	ap_poll_timer.function = ap_poll_timeout;
   2227
   2228	/* Start the low priority AP bus poll thread. */
   2229	if (ap_thread_flag) {
   2230		rc = ap_poll_thread_start();
   2231		if (rc)
   2232			goto out_work;
   2233	}
   2234
   2235	queue_work(system_long_wq, &ap_scan_work);
   2236
   2237	return 0;
   2238
   2239out_work:
   2240	hrtimer_cancel(&ap_poll_timer);
   2241	root_device_unregister(ap_root_device);
   2242out_bus:
   2243	bus_unregister(&ap_bus_type);
   2244out:
   2245	if (ap_irq_flag)
   2246		unregister_adapter_interrupt(&ap_airq);
   2247	kfree(ap_qci_info);
   2248	return rc;
   2249}
   2250device_initcall(ap_module_init);