cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hyperv_vmbus.h (12624B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 *
      4 * Copyright (c) 2011, Microsoft Corporation.
      5 *
      6 * Authors:
      7 *   Haiyang Zhang <haiyangz@microsoft.com>
      8 *   Hank Janssen  <hjanssen@microsoft.com>
      9 *   K. Y. Srinivasan <kys@microsoft.com>
     10 */
     11
     12#ifndef _HYPERV_VMBUS_H
     13#define _HYPERV_VMBUS_H
     14
     15#include <linux/list.h>
     16#include <linux/bitops.h>
     17#include <asm/sync_bitops.h>
     18#include <asm/hyperv-tlfs.h>
     19#include <linux/atomic.h>
     20#include <linux/hyperv.h>
     21#include <linux/interrupt.h>
     22
     23#include "hv_trace.h"
     24
     25/*
     26 * Timeout for services such as KVP and fcopy.
     27 */
     28#define HV_UTIL_TIMEOUT 30
     29
     30/*
     31 * Timeout for guest-host handshake for services.
     32 */
     33#define HV_UTIL_NEGO_TIMEOUT 55
     34
     35
     36/* Definitions for the monitored notification facility */
     37union hv_monitor_trigger_group {
     38	u64 as_uint64;
     39	struct {
     40		u32 pending;
     41		u32 armed;
     42	};
     43};
     44
     45struct hv_monitor_parameter {
     46	union hv_connection_id connectionid;
     47	u16 flagnumber;
     48	u16 rsvdz;
     49};
     50
     51union hv_monitor_trigger_state {
     52	u32 asu32;
     53
     54	struct {
     55		u32 group_enable:4;
     56		u32 rsvdz:28;
     57	};
     58};
     59
     60/* struct hv_monitor_page Layout */
     61/* ------------------------------------------------------ */
     62/* | 0   | TriggerState (4 bytes) | Rsvd1 (4 bytes)     | */
     63/* | 8   | TriggerGroup[0]                              | */
     64/* | 10  | TriggerGroup[1]                              | */
     65/* | 18  | TriggerGroup[2]                              | */
     66/* | 20  | TriggerGroup[3]                              | */
     67/* | 28  | Rsvd2[0]                                     | */
     68/* | 30  | Rsvd2[1]                                     | */
     69/* | 38  | Rsvd2[2]                                     | */
     70/* | 40  | NextCheckTime[0][0]    | NextCheckTime[0][1] | */
     71/* | ...                                                | */
     72/* | 240 | Latency[0][0..3]                             | */
     73/* | 340 | Rsvz3[0]                                     | */
     74/* | 440 | Parameter[0][0]                              | */
     75/* | 448 | Parameter[0][1]                              | */
     76/* | ...                                                | */
     77/* | 840 | Rsvd4[0]                                     | */
     78/* ------------------------------------------------------ */
     79struct hv_monitor_page {
     80	union hv_monitor_trigger_state trigger_state;
     81	u32 rsvdz1;
     82
     83	union hv_monitor_trigger_group trigger_group[4];
     84	u64 rsvdz2[3];
     85
     86	s32 next_checktime[4][32];
     87
     88	u16 latency[4][32];
     89	u64 rsvdz3[32];
     90
     91	struct hv_monitor_parameter parameter[4][32];
     92
     93	u8 rsvdz4[1984];
     94};
     95
     96#define HV_HYPERCALL_PARAM_ALIGN	sizeof(u64)
     97
     98/* Definition of the hv_post_message hypercall input structure. */
     99struct hv_input_post_message {
    100	union hv_connection_id connectionid;
    101	u32 reserved;
    102	u32 message_type;
    103	u32 payload_size;
    104	u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
    105};
    106
    107
    108enum {
    109	VMBUS_MESSAGE_CONNECTION_ID	= 1,
    110	VMBUS_MESSAGE_CONNECTION_ID_4	= 4,
    111	VMBUS_MESSAGE_PORT_ID		= 1,
    112	VMBUS_EVENT_CONNECTION_ID	= 2,
    113	VMBUS_EVENT_PORT_ID		= 2,
    114	VMBUS_MONITOR_CONNECTION_ID	= 3,
    115	VMBUS_MONITOR_PORT_ID		= 3,
    116	VMBUS_MESSAGE_SINT		= 2,
    117};
    118
    119/*
    120 * Per cpu state for channel handling
    121 */
    122struct hv_per_cpu_context {
    123	void *synic_message_page;
    124	void *synic_event_page;
    125	/*
    126	 * buffer to post messages to the host.
    127	 */
    128	void *post_msg_page;
    129
    130	/*
    131	 * Starting with win8, we can take channel interrupts on any CPU;
    132	 * we will manage the tasklet that handles events messages on a per CPU
    133	 * basis.
    134	 */
    135	struct tasklet_struct msg_dpc;
    136};
    137
    138struct hv_context {
    139	/* We only support running on top of Hyper-V
    140	 * So at this point this really can only contain the Hyper-V ID
    141	 */
    142	u64 guestid;
    143
    144	struct hv_per_cpu_context __percpu *cpu_context;
    145
    146	/*
    147	 * To manage allocations in a NUMA node.
    148	 * Array indexed by numa node ID.
    149	 */
    150	struct cpumask *hv_numa_map;
    151};
    152
    153extern struct hv_context hv_context;
    154
    155/* Hv Interface */
    156
    157extern int hv_init(void);
    158
    159extern int hv_post_message(union hv_connection_id connection_id,
    160			 enum hv_message_type message_type,
    161			 void *payload, size_t payload_size);
    162
    163extern int hv_synic_alloc(void);
    164
    165extern void hv_synic_free(void);
    166
    167extern void hv_synic_enable_regs(unsigned int cpu);
    168extern int hv_synic_init(unsigned int cpu);
    169
    170extern void hv_synic_disable_regs(unsigned int cpu);
    171extern int hv_synic_cleanup(unsigned int cpu);
    172
    173/* Interface */
    174
    175void hv_ringbuffer_pre_init(struct vmbus_channel *channel);
    176
    177int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
    178		       struct page *pages, u32 pagecnt, u32 max_pkt_size);
    179
    180void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
    181
    182int hv_ringbuffer_write(struct vmbus_channel *channel,
    183			const struct kvec *kv_list, u32 kv_count,
    184			u64 requestid, u64 *trans_id);
    185
    186int hv_ringbuffer_read(struct vmbus_channel *channel,
    187		       void *buffer, u32 buflen, u32 *buffer_actual_len,
    188		       u64 *requestid, bool raw);
    189
    190/*
    191 * The Maximum number of channels (16384) is determined by the size of the
    192 * interrupt page, which is HV_HYP_PAGE_SIZE. 1/2 of HV_HYP_PAGE_SIZE is to
    193 * send endpoint interrupts, and the other is to receive endpoint interrupts.
    194 */
    195#define MAX_NUM_CHANNELS	((HV_HYP_PAGE_SIZE >> 1) << 3)
    196
    197/* The value here must be in multiple of 32 */
    198#define MAX_NUM_CHANNELS_SUPPORTED	256
    199
    200#define MAX_CHANNEL_RELIDS					\
    201	max(MAX_NUM_CHANNELS_SUPPORTED, HV_EVENT_FLAGS_COUNT)
    202
    203enum vmbus_connect_state {
    204	DISCONNECTED,
    205	CONNECTING,
    206	CONNECTED,
    207	DISCONNECTING
    208};
    209
    210#define MAX_SIZE_CHANNEL_MESSAGE	HV_MESSAGE_PAYLOAD_BYTE_COUNT
    211
    212/*
    213 * The CPU that Hyper-V will interrupt for VMBUS messages, such as
    214 * CHANNELMSG_OFFERCHANNEL and CHANNELMSG_RESCIND_CHANNELOFFER.
    215 */
    216#define VMBUS_CONNECT_CPU	0
    217
    218struct vmbus_connection {
    219	u32 msg_conn_id;
    220
    221	atomic_t offer_in_progress;
    222
    223	enum vmbus_connect_state conn_state;
    224
    225	atomic_t next_gpadl_handle;
    226
    227	struct completion  unload_event;
    228	/*
    229	 * Represents channel interrupts. Each bit position represents a
    230	 * channel.  When a channel sends an interrupt via VMBUS, it finds its
    231	 * bit in the sendInterruptPage, set it and calls Hv to generate a port
    232	 * event. The other end receives the port event and parse the
    233	 * recvInterruptPage to see which bit is set
    234	 */
    235	void *int_page;
    236	void *send_int_page;
    237	void *recv_int_page;
    238
    239	/*
    240	 * 2 pages - 1st page for parent->child notification and 2nd
    241	 * is child->parent notification
    242	 */
    243	struct hv_monitor_page *monitor_pages[2];
    244	void *monitor_pages_original[2];
    245	phys_addr_t monitor_pages_pa[2];
    246	struct list_head chn_msg_list;
    247	spinlock_t channelmsg_lock;
    248
    249	/* List of channels */
    250	struct list_head chn_list;
    251	struct mutex channel_mutex;
    252
    253	/* Array of channels */
    254	struct vmbus_channel **channels;
    255
    256	/*
    257	 * An offer message is handled first on the work_queue, and then
    258	 * is further handled on handle_primary_chan_wq or
    259	 * handle_sub_chan_wq.
    260	 */
    261	struct workqueue_struct *work_queue;
    262	struct workqueue_struct *handle_primary_chan_wq;
    263	struct workqueue_struct *handle_sub_chan_wq;
    264
    265	/*
    266	 * The number of sub-channels and hv_sock channels that should be
    267	 * cleaned up upon suspend: sub-channels will be re-created upon
    268	 * resume, and hv_sock channels should not survive suspend.
    269	 */
    270	atomic_t nr_chan_close_on_suspend;
    271	/*
    272	 * vmbus_bus_suspend() waits for "nr_chan_close_on_suspend" to
    273	 * drop to zero.
    274	 */
    275	struct completion ready_for_suspend_event;
    276
    277	/*
    278	 * The number of primary channels that should be "fixed up"
    279	 * upon resume: these channels are re-offered upon resume, and some
    280	 * fields of the channel offers (i.e. child_relid and connection_id)
    281	 * can change, so the old offermsg must be fixed up, before the resume
    282	 * callbacks of the VSC drivers start to further touch the channels.
    283	 */
    284	atomic_t nr_chan_fixup_on_resume;
    285	/*
    286	 * vmbus_bus_resume() waits for "nr_chan_fixup_on_resume" to
    287	 * drop to zero.
    288	 */
    289	struct completion ready_for_resume_event;
    290};
    291
    292
    293struct vmbus_msginfo {
    294	/* Bookkeeping stuff */
    295	struct list_head msglist_entry;
    296
    297	/* The message itself */
    298	unsigned char msg[];
    299};
    300
    301
    302extern struct vmbus_connection vmbus_connection;
    303
    304int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version);
    305
    306static inline void vmbus_send_interrupt(u32 relid)
    307{
    308	sync_set_bit(relid, vmbus_connection.send_int_page);
    309}
    310
    311enum vmbus_message_handler_type {
    312	/* The related handler can sleep. */
    313	VMHT_BLOCKING = 0,
    314
    315	/* The related handler must NOT sleep. */
    316	VMHT_NON_BLOCKING = 1,
    317};
    318
    319struct vmbus_channel_message_table_entry {
    320	enum vmbus_channel_message_type message_type;
    321	enum vmbus_message_handler_type handler_type;
    322	void (*message_handler)(struct vmbus_channel_message_header *msg);
    323	u32 min_payload_len;
    324};
    325
    326extern const struct vmbus_channel_message_table_entry
    327	channel_message_table[CHANNELMSG_COUNT];
    328
    329
    330/* General vmbus interface */
    331
    332struct hv_device *vmbus_device_create(const guid_t *type,
    333				      const guid_t *instance,
    334				      struct vmbus_channel *channel);
    335
    336int vmbus_device_register(struct hv_device *child_device_obj);
    337void vmbus_device_unregister(struct hv_device *device_obj);
    338int vmbus_add_channel_kobj(struct hv_device *device_obj,
    339			   struct vmbus_channel *channel);
    340
    341void vmbus_remove_channel_attr_group(struct vmbus_channel *channel);
    342
    343void vmbus_channel_map_relid(struct vmbus_channel *channel);
    344void vmbus_channel_unmap_relid(struct vmbus_channel *channel);
    345
    346struct vmbus_channel *relid2channel(u32 relid);
    347
    348void vmbus_free_channels(void);
    349
    350/* Connection interface */
    351
    352int vmbus_connect(void);
    353void vmbus_disconnect(void);
    354
    355int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep);
    356
    357void vmbus_on_event(unsigned long data);
    358void vmbus_on_msg_dpc(unsigned long data);
    359
    360int hv_kvp_init(struct hv_util_service *srv);
    361void hv_kvp_deinit(void);
    362int hv_kvp_pre_suspend(void);
    363int hv_kvp_pre_resume(void);
    364void hv_kvp_onchannelcallback(void *context);
    365
    366int hv_vss_init(struct hv_util_service *srv);
    367void hv_vss_deinit(void);
    368int hv_vss_pre_suspend(void);
    369int hv_vss_pre_resume(void);
    370void hv_vss_onchannelcallback(void *context);
    371
    372int hv_fcopy_init(struct hv_util_service *srv);
    373void hv_fcopy_deinit(void);
    374int hv_fcopy_pre_suspend(void);
    375int hv_fcopy_pre_resume(void);
    376void hv_fcopy_onchannelcallback(void *context);
    377void vmbus_initiate_unload(bool crash);
    378
    379static inline void hv_poll_channel(struct vmbus_channel *channel,
    380				   void (*cb)(void *))
    381{
    382	if (!channel)
    383		return;
    384	cb(channel);
    385}
    386
    387enum hvutil_device_state {
    388	HVUTIL_DEVICE_INIT = 0,  /* driver is loaded, waiting for userspace */
    389	HVUTIL_READY,            /* userspace is registered */
    390	HVUTIL_HOSTMSG_RECEIVED, /* message from the host was received */
    391	HVUTIL_USERSPACE_REQ,    /* request to userspace was sent */
    392	HVUTIL_USERSPACE_RECV,   /* reply from userspace was received */
    393	HVUTIL_DEVICE_DYING,     /* driver unload is in progress */
    394};
    395
    396enum delay {
    397	INTERRUPT_DELAY = 0,
    398	MESSAGE_DELAY   = 1,
    399};
    400
    401extern const struct vmbus_device vmbus_devs[];
    402
    403static inline bool hv_is_perf_channel(struct vmbus_channel *channel)
    404{
    405	return vmbus_devs[channel->device_id].perf_device;
    406}
    407
    408static inline bool hv_is_allocated_cpu(unsigned int cpu)
    409{
    410	struct vmbus_channel *channel, *sc;
    411
    412	lockdep_assert_held(&vmbus_connection.channel_mutex);
    413	/*
    414	 * List additions/deletions as well as updates of the target CPUs are
    415	 * protected by channel_mutex.
    416	 */
    417	list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
    418		if (!hv_is_perf_channel(channel))
    419			continue;
    420		if (channel->target_cpu == cpu)
    421			return true;
    422		list_for_each_entry(sc, &channel->sc_list, sc_list) {
    423			if (sc->target_cpu == cpu)
    424				return true;
    425		}
    426	}
    427	return false;
    428}
    429
    430static inline void hv_set_allocated_cpu(unsigned int cpu)
    431{
    432	cpumask_set_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
    433}
    434
    435static inline void hv_clear_allocated_cpu(unsigned int cpu)
    436{
    437	if (hv_is_allocated_cpu(cpu))
    438		return;
    439	cpumask_clear_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
    440}
    441
    442static inline void hv_update_allocated_cpus(unsigned int old_cpu,
    443					  unsigned int new_cpu)
    444{
    445	hv_set_allocated_cpu(new_cpu);
    446	hv_clear_allocated_cpu(old_cpu);
    447}
    448
    449#ifdef CONFIG_HYPERV_TESTING
    450
    451int hv_debug_add_dev_dir(struct hv_device *dev);
    452void hv_debug_rm_dev_dir(struct hv_device *dev);
    453void hv_debug_rm_all_dir(void);
    454int hv_debug_init(void);
    455void hv_debug_delay_test(struct vmbus_channel *channel, enum delay delay_type);
    456
    457#else /* CONFIG_HYPERV_TESTING */
    458
    459static inline void hv_debug_rm_dev_dir(struct hv_device *dev) {};
    460static inline void hv_debug_rm_all_dir(void) {};
    461static inline void hv_debug_delay_test(struct vmbus_channel *channel,
    462				       enum delay delay_type) {};
    463static inline int hv_debug_init(void)
    464{
    465	return -1;
    466}
    467
    468static inline int hv_debug_add_dev_dir(struct hv_device *dev)
    469{
    470	return -1;
    471}
    472
    473#endif /* CONFIG_HYPERV_TESTING */
    474
    475#endif /* _HYPERV_VMBUS_H */