cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sclp_cmd.c (14699B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright IBM Corp. 2007,2012
      4 *
      5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
      6 */
      7
      8#define KMSG_COMPONENT "sclp_cmd"
      9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
     10
     11#include <linux/completion.h>
     12#include <linux/init.h>
     13#include <linux/errno.h>
     14#include <linux/err.h>
     15#include <linux/export.h>
     16#include <linux/slab.h>
     17#include <linux/string.h>
     18#include <linux/mm.h>
     19#include <linux/mmzone.h>
     20#include <linux/memory.h>
     21#include <linux/module.h>
     22#include <asm/ctl_reg.h>
     23#include <asm/chpid.h>
     24#include <asm/setup.h>
     25#include <asm/page.h>
     26#include <asm/sclp.h>
     27#include <asm/numa.h>
     28#include <asm/facility.h>
     29
     30#include "sclp.h"
     31
     32static void sclp_sync_callback(struct sclp_req *req, void *data)
     33{
     34	struct completion *completion = data;
     35
     36	complete(completion);
     37}
     38
     39int sclp_sync_request(sclp_cmdw_t cmd, void *sccb)
     40{
     41	return sclp_sync_request_timeout(cmd, sccb, 0);
     42}
     43
     44int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout)
     45{
     46	struct completion completion;
     47	struct sclp_req *request;
     48	int rc;
     49
     50	request = kzalloc(sizeof(*request), GFP_KERNEL);
     51	if (!request)
     52		return -ENOMEM;
     53	if (timeout)
     54		request->queue_timeout = timeout;
     55	request->command = cmd;
     56	request->sccb = sccb;
     57	request->status = SCLP_REQ_FILLED;
     58	request->callback = sclp_sync_callback;
     59	request->callback_data = &completion;
     60	init_completion(&completion);
     61
     62	/* Perform sclp request. */
     63	rc = sclp_add_request(request);
     64	if (rc)
     65		goto out;
     66	wait_for_completion(&completion);
     67
     68	/* Check response. */
     69	if (request->status != SCLP_REQ_DONE) {
     70		pr_warn("sync request failed (cmd=0x%08x, status=0x%02x)\n",
     71			cmd, request->status);
     72		rc = -EIO;
     73	}
     74out:
     75	kfree(request);
     76	return rc;
     77}
     78
     79/*
     80 * CPU configuration related functions.
     81 */
     82
     83#define SCLP_CMDW_CONFIGURE_CPU		0x00110001
     84#define SCLP_CMDW_DECONFIGURE_CPU	0x00100001
     85
     86int _sclp_get_core_info(struct sclp_core_info *info)
     87{
     88	int rc;
     89	int length = test_facility(140) ? EXT_SCCB_READ_CPU : PAGE_SIZE;
     90	struct read_cpu_info_sccb *sccb;
     91
     92	if (!SCLP_HAS_CPU_INFO)
     93		return -EOPNOTSUPP;
     94
     95	sccb = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA | __GFP_ZERO, get_order(length));
     96	if (!sccb)
     97		return -ENOMEM;
     98	sccb->header.length = length;
     99	sccb->header.control_mask[2] = 0x80;
    100	rc = sclp_sync_request_timeout(SCLP_CMDW_READ_CPU_INFO, sccb,
    101				       SCLP_QUEUE_INTERVAL);
    102	if (rc)
    103		goto out;
    104	if (sccb->header.response_code != 0x0010) {
    105		pr_warn("readcpuinfo failed (response=0x%04x)\n",
    106			sccb->header.response_code);
    107		rc = -EIO;
    108		goto out;
    109	}
    110	sclp_fill_core_info(info, sccb);
    111out:
    112	free_pages((unsigned long) sccb, get_order(length));
    113	return rc;
    114}
    115
    116struct cpu_configure_sccb {
    117	struct sccb_header header;
    118} __attribute__((packed, aligned(8)));
    119
    120static int do_core_configure(sclp_cmdw_t cmd)
    121{
    122	struct cpu_configure_sccb *sccb;
    123	int rc;
    124
    125	if (!SCLP_HAS_CPU_RECONFIG)
    126		return -EOPNOTSUPP;
    127	/*
    128	 * This is not going to cross a page boundary since we force
    129	 * kmalloc to have a minimum alignment of 8 bytes on s390.
    130	 */
    131	sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
    132	if (!sccb)
    133		return -ENOMEM;
    134	sccb->header.length = sizeof(*sccb);
    135	rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
    136	if (rc)
    137		goto out;
    138	switch (sccb->header.response_code) {
    139	case 0x0020:
    140	case 0x0120:
    141		break;
    142	default:
    143		pr_warn("configure cpu failed (cmd=0x%08x, response=0x%04x)\n",
    144			cmd, sccb->header.response_code);
    145		rc = -EIO;
    146		break;
    147	}
    148out:
    149	kfree(sccb);
    150	return rc;
    151}
    152
    153int sclp_core_configure(u8 core)
    154{
    155	return do_core_configure(SCLP_CMDW_CONFIGURE_CPU | core << 8);
    156}
    157
    158int sclp_core_deconfigure(u8 core)
    159{
    160	return do_core_configure(SCLP_CMDW_DECONFIGURE_CPU | core << 8);
    161}
    162
    163#ifdef CONFIG_MEMORY_HOTPLUG
    164
    165static DEFINE_MUTEX(sclp_mem_mutex);
    166static LIST_HEAD(sclp_mem_list);
    167static u8 sclp_max_storage_id;
    168static DECLARE_BITMAP(sclp_storage_ids, 256);
    169
    170struct memory_increment {
    171	struct list_head list;
    172	u16 rn;
    173	int standby;
    174};
    175
    176struct assign_storage_sccb {
    177	struct sccb_header header;
    178	u16 rn;
    179} __packed;
    180
    181int arch_get_memory_phys_device(unsigned long start_pfn)
    182{
    183	if (!sclp.rzm)
    184		return 0;
    185	return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm);
    186}
    187
    188static unsigned long long rn2addr(u16 rn)
    189{
    190	return (unsigned long long) (rn - 1) * sclp.rzm;
    191}
    192
    193static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
    194{
    195	struct assign_storage_sccb *sccb;
    196	int rc;
    197
    198	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
    199	if (!sccb)
    200		return -ENOMEM;
    201	sccb->header.length = PAGE_SIZE;
    202	sccb->rn = rn;
    203	rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
    204	if (rc)
    205		goto out;
    206	switch (sccb->header.response_code) {
    207	case 0x0020:
    208	case 0x0120:
    209		break;
    210	default:
    211		pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n",
    212			cmd, sccb->header.response_code, rn);
    213		rc = -EIO;
    214		break;
    215	}
    216out:
    217	free_page((unsigned long) sccb);
    218	return rc;
    219}
    220
    221static int sclp_assign_storage(u16 rn)
    222{
    223	unsigned long long start;
    224	int rc;
    225
    226	rc = do_assign_storage(0x000d0001, rn);
    227	if (rc)
    228		return rc;
    229	start = rn2addr(rn);
    230	storage_key_init_range(start, start + sclp.rzm);
    231	return 0;
    232}
    233
    234static int sclp_unassign_storage(u16 rn)
    235{
    236	return do_assign_storage(0x000c0001, rn);
    237}
    238
    239struct attach_storage_sccb {
    240	struct sccb_header header;
    241	u16 :16;
    242	u16 assigned;
    243	u32 :32;
    244	u32 entries[0];
    245} __packed;
    246
    247static int sclp_attach_storage(u8 id)
    248{
    249	struct attach_storage_sccb *sccb;
    250	int rc;
    251	int i;
    252
    253	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
    254	if (!sccb)
    255		return -ENOMEM;
    256	sccb->header.length = PAGE_SIZE;
    257	sccb->header.function_code = 0x40;
    258	rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb,
    259				       SCLP_QUEUE_INTERVAL);
    260	if (rc)
    261		goto out;
    262	switch (sccb->header.response_code) {
    263	case 0x0020:
    264		set_bit(id, sclp_storage_ids);
    265		for (i = 0; i < sccb->assigned; i++) {
    266			if (sccb->entries[i])
    267				sclp_unassign_storage(sccb->entries[i] >> 16);
    268		}
    269		break;
    270	default:
    271		rc = -EIO;
    272		break;
    273	}
    274out:
    275	free_page((unsigned long) sccb);
    276	return rc;
    277}
    278
    279static int sclp_mem_change_state(unsigned long start, unsigned long size,
    280				 int online)
    281{
    282	struct memory_increment *incr;
    283	unsigned long long istart;
    284	int rc = 0;
    285
    286	list_for_each_entry(incr, &sclp_mem_list, list) {
    287		istart = rn2addr(incr->rn);
    288		if (start + size - 1 < istart)
    289			break;
    290		if (start > istart + sclp.rzm - 1)
    291			continue;
    292		if (online)
    293			rc |= sclp_assign_storage(incr->rn);
    294		else
    295			sclp_unassign_storage(incr->rn);
    296		if (rc == 0)
    297			incr->standby = online ? 0 : 1;
    298	}
    299	return rc ? -EIO : 0;
    300}
    301
    302static bool contains_standby_increment(unsigned long start, unsigned long end)
    303{
    304	struct memory_increment *incr;
    305	unsigned long istart;
    306
    307	list_for_each_entry(incr, &sclp_mem_list, list) {
    308		istart = rn2addr(incr->rn);
    309		if (end - 1 < istart)
    310			continue;
    311		if (start > istart + sclp.rzm - 1)
    312			continue;
    313		if (incr->standby)
    314			return true;
    315	}
    316	return false;
    317}
    318
    319static int sclp_mem_notifier(struct notifier_block *nb,
    320			     unsigned long action, void *data)
    321{
    322	unsigned long start, size;
    323	struct memory_notify *arg;
    324	unsigned char id;
    325	int rc = 0;
    326
    327	arg = data;
    328	start = arg->start_pfn << PAGE_SHIFT;
    329	size = arg->nr_pages << PAGE_SHIFT;
    330	mutex_lock(&sclp_mem_mutex);
    331	for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
    332		sclp_attach_storage(id);
    333	switch (action) {
    334	case MEM_GOING_OFFLINE:
    335		/*
    336		 * We do not allow to set memory blocks offline that contain
    337		 * standby memory. This is done to simplify the "memory online"
    338		 * case.
    339		 */
    340		if (contains_standby_increment(start, start + size))
    341			rc = -EPERM;
    342		break;
    343	case MEM_ONLINE:
    344	case MEM_CANCEL_OFFLINE:
    345		break;
    346	case MEM_GOING_ONLINE:
    347		rc = sclp_mem_change_state(start, size, 1);
    348		break;
    349	case MEM_CANCEL_ONLINE:
    350		sclp_mem_change_state(start, size, 0);
    351		break;
    352	case MEM_OFFLINE:
    353		sclp_mem_change_state(start, size, 0);
    354		break;
    355	default:
    356		rc = -EINVAL;
    357		break;
    358	}
    359	mutex_unlock(&sclp_mem_mutex);
    360	return rc ? NOTIFY_BAD : NOTIFY_OK;
    361}
    362
    363static struct notifier_block sclp_mem_nb = {
    364	.notifier_call = sclp_mem_notifier,
    365};
    366
    367static void __init align_to_block_size(unsigned long long *start,
    368				       unsigned long long *size,
    369				       unsigned long long alignment)
    370{
    371	unsigned long long start_align, size_align;
    372
    373	start_align = roundup(*start, alignment);
    374	size_align = rounddown(*start + *size, alignment) - start_align;
    375
    376	pr_info("Standby memory at 0x%llx (%lluM of %lluM usable)\n",
    377		*start, size_align >> 20, *size >> 20);
    378	*start = start_align;
    379	*size = size_align;
    380}
    381
    382static void __init add_memory_merged(u16 rn)
    383{
    384	unsigned long long start, size, addr, block_size;
    385	static u16 first_rn, num;
    386
    387	if (rn && first_rn && (first_rn + num == rn)) {
    388		num++;
    389		return;
    390	}
    391	if (!first_rn)
    392		goto skip_add;
    393	start = rn2addr(first_rn);
    394	size = (unsigned long long) num * sclp.rzm;
    395	if (start >= VMEM_MAX_PHYS)
    396		goto skip_add;
    397	if (start + size > VMEM_MAX_PHYS)
    398		size = VMEM_MAX_PHYS - start;
    399	if (start >= ident_map_size)
    400		goto skip_add;
    401	if (start + size > ident_map_size)
    402		size = ident_map_size - start;
    403	block_size = memory_block_size_bytes();
    404	align_to_block_size(&start, &size, block_size);
    405	if (!size)
    406		goto skip_add;
    407	for (addr = start; addr < start + size; addr += block_size)
    408		add_memory(0, addr, block_size, MHP_NONE);
    409skip_add:
    410	first_rn = rn;
    411	num = 1;
    412}
    413
    414static void __init sclp_add_standby_memory(void)
    415{
    416	struct memory_increment *incr;
    417
    418	list_for_each_entry(incr, &sclp_mem_list, list)
    419		if (incr->standby)
    420			add_memory_merged(incr->rn);
    421	add_memory_merged(0);
    422}
    423
    424static void __init insert_increment(u16 rn, int standby, int assigned)
    425{
    426	struct memory_increment *incr, *new_incr;
    427	struct list_head *prev;
    428	u16 last_rn;
    429
    430	new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
    431	if (!new_incr)
    432		return;
    433	new_incr->rn = rn;
    434	new_incr->standby = standby;
    435	last_rn = 0;
    436	prev = &sclp_mem_list;
    437	list_for_each_entry(incr, &sclp_mem_list, list) {
    438		if (assigned && incr->rn > rn)
    439			break;
    440		if (!assigned && incr->rn - last_rn > 1)
    441			break;
    442		last_rn = incr->rn;
    443		prev = &incr->list;
    444	}
    445	if (!assigned)
    446		new_incr->rn = last_rn + 1;
    447	if (new_incr->rn > sclp.rnmax) {
    448		kfree(new_incr);
    449		return;
    450	}
    451	list_add(&new_incr->list, prev);
    452}
    453
    454static int __init sclp_detect_standby_memory(void)
    455{
    456	struct read_storage_sccb *sccb;
    457	int i, id, assigned, rc;
    458
    459	if (oldmem_data.start) /* No standby memory in kdump mode */
    460		return 0;
    461	if ((sclp.facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
    462		return 0;
    463	rc = -ENOMEM;
    464	sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
    465	if (!sccb)
    466		goto out;
    467	assigned = 0;
    468	for (id = 0; id <= sclp_max_storage_id; id++) {
    469		memset(sccb, 0, PAGE_SIZE);
    470		sccb->header.length = PAGE_SIZE;
    471		rc = sclp_sync_request(SCLP_CMDW_READ_STORAGE_INFO | id << 8, sccb);
    472		if (rc)
    473			goto out;
    474		switch (sccb->header.response_code) {
    475		case 0x0010:
    476			set_bit(id, sclp_storage_ids);
    477			for (i = 0; i < sccb->assigned; i++) {
    478				if (!sccb->entries[i])
    479					continue;
    480				assigned++;
    481				insert_increment(sccb->entries[i] >> 16, 0, 1);
    482			}
    483			break;
    484		case 0x0310:
    485			break;
    486		case 0x0410:
    487			for (i = 0; i < sccb->assigned; i++) {
    488				if (!sccb->entries[i])
    489					continue;
    490				assigned++;
    491				insert_increment(sccb->entries[i] >> 16, 1, 1);
    492			}
    493			break;
    494		default:
    495			rc = -EIO;
    496			break;
    497		}
    498		if (!rc)
    499			sclp_max_storage_id = sccb->max_id;
    500	}
    501	if (rc || list_empty(&sclp_mem_list))
    502		goto out;
    503	for (i = 1; i <= sclp.rnmax - assigned; i++)
    504		insert_increment(0, 1, 0);
    505	rc = register_memory_notifier(&sclp_mem_nb);
    506	if (rc)
    507		goto out;
    508	sclp_add_standby_memory();
    509out:
    510	free_page((unsigned long) sccb);
    511	return rc;
    512}
    513__initcall(sclp_detect_standby_memory);
    514
    515#endif /* CONFIG_MEMORY_HOTPLUG */
    516
    517/*
    518 * Channel path configuration related functions.
    519 */
    520
    521#define SCLP_CMDW_CONFIGURE_CHPATH		0x000f0001
    522#define SCLP_CMDW_DECONFIGURE_CHPATH		0x000e0001
    523#define SCLP_CMDW_READ_CHPATH_INFORMATION	0x00030001
    524
    525struct chp_cfg_sccb {
    526	struct sccb_header header;
    527	u8 ccm;
    528	u8 reserved[6];
    529	u8 cssid;
    530} __attribute__((packed));
    531
    532static int do_chp_configure(sclp_cmdw_t cmd)
    533{
    534	struct chp_cfg_sccb *sccb;
    535	int rc;
    536
    537	if (!SCLP_HAS_CHP_RECONFIG)
    538		return -EOPNOTSUPP;
    539	/* Prepare sccb. */
    540	sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
    541	if (!sccb)
    542		return -ENOMEM;
    543	sccb->header.length = sizeof(*sccb);
    544	rc = sclp_sync_request(cmd, sccb);
    545	if (rc)
    546		goto out;
    547	switch (sccb->header.response_code) {
    548	case 0x0020:
    549	case 0x0120:
    550	case 0x0440:
    551	case 0x0450:
    552		break;
    553	default:
    554		pr_warn("configure channel-path failed (cmd=0x%08x, response=0x%04x)\n",
    555			cmd, sccb->header.response_code);
    556		rc = -EIO;
    557		break;
    558	}
    559out:
    560	free_page((unsigned long) sccb);
    561	return rc;
    562}
    563
    564/**
    565 * sclp_chp_configure - perform configure channel-path sclp command
    566 * @chpid: channel-path ID
    567 *
    568 * Perform configure channel-path command sclp command for specified chpid.
    569 * Return 0 after command successfully finished, non-zero otherwise.
    570 */
    571int sclp_chp_configure(struct chp_id chpid)
    572{
    573	return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
    574}
    575
    576/**
    577 * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
    578 * @chpid: channel-path ID
    579 *
    580 * Perform deconfigure channel-path command sclp command for specified chpid
    581 * and wait for completion. On success return 0. Return non-zero otherwise.
    582 */
    583int sclp_chp_deconfigure(struct chp_id chpid)
    584{
    585	return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
    586}
    587
    588struct chp_info_sccb {
    589	struct sccb_header header;
    590	u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
    591	u8 standby[SCLP_CHP_INFO_MASK_SIZE];
    592	u8 configured[SCLP_CHP_INFO_MASK_SIZE];
    593	u8 ccm;
    594	u8 reserved[6];
    595	u8 cssid;
    596} __attribute__((packed));
    597
    598/**
    599 * sclp_chp_read_info - perform read channel-path information sclp command
    600 * @info: resulting channel-path information data
    601 *
    602 * Perform read channel-path information sclp command and wait for completion.
    603 * On success, store channel-path information in @info and return 0. Return
    604 * non-zero otherwise.
    605 */
    606int sclp_chp_read_info(struct sclp_chp_info *info)
    607{
    608	struct chp_info_sccb *sccb;
    609	int rc;
    610
    611	if (!SCLP_HAS_CHP_INFO)
    612		return -EOPNOTSUPP;
    613	/* Prepare sccb. */
    614	sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
    615	if (!sccb)
    616		return -ENOMEM;
    617	sccb->header.length = sizeof(*sccb);
    618	rc = sclp_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
    619	if (rc)
    620		goto out;
    621	if (sccb->header.response_code != 0x0010) {
    622		pr_warn("read channel-path info failed (response=0x%04x)\n",
    623			sccb->header.response_code);
    624		rc = -EIO;
    625		goto out;
    626	}
    627	memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE);
    628	memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
    629	memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
    630out:
    631	free_page((unsigned long) sccb);
    632	return rc;
    633}