cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ice_vf_mbx.c (18211B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Copyright (c) 2018, Intel Corporation. */
      3
      4#include "ice_common.h"
      5#include "ice_vf_mbx.h"
      6
      7/**
      8 * ice_aq_send_msg_to_vf
      9 * @hw: pointer to the hardware structure
     10 * @vfid: VF ID to send msg
     11 * @v_opcode: opcodes for VF-PF communication
     12 * @v_retval: return error code
     13 * @msg: pointer to the msg buffer
     14 * @msglen: msg length
     15 * @cd: pointer to command details
     16 *
     17 * Send message to VF driver (0x0802) using mailbox
     18 * queue and asynchronously sending message via
     19 * ice_sq_send_cmd() function
     20 */
     21int
     22ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
     23		      u8 *msg, u16 msglen, struct ice_sq_cd *cd)
     24{
     25	struct ice_aqc_pf_vf_msg *cmd;
     26	struct ice_aq_desc desc;
     27
     28	ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
     29
     30	cmd = &desc.params.virt;
     31	cmd->id = cpu_to_le32(vfid);
     32
     33	desc.cookie_high = cpu_to_le32(v_opcode);
     34	desc.cookie_low = cpu_to_le32(v_retval);
     35
     36	if (msglen)
     37		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
     38
     39	return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
     40}
     41
     42/**
     43 * ice_conv_link_speed_to_virtchnl
     44 * @adv_link_support: determines the format of the returned link speed
     45 * @link_speed: variable containing the link_speed to be converted
     46 *
     47 * Convert link speed supported by HW to link speed supported by virtchnl.
     48 * If adv_link_support is true, then return link speed in Mbps. Else return
     49 * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
     50 * needs to cast back to an enum virtchnl_link_speed in the case where
     51 * adv_link_support is false, but when adv_link_support is true the caller can
     52 * expect the speed in Mbps.
     53 */
     54u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
     55{
     56	u32 speed;
     57
     58	if (adv_link_support)
     59		switch (link_speed) {
     60		case ICE_AQ_LINK_SPEED_10MB:
     61			speed = ICE_LINK_SPEED_10MBPS;
     62			break;
     63		case ICE_AQ_LINK_SPEED_100MB:
     64			speed = ICE_LINK_SPEED_100MBPS;
     65			break;
     66		case ICE_AQ_LINK_SPEED_1000MB:
     67			speed = ICE_LINK_SPEED_1000MBPS;
     68			break;
     69		case ICE_AQ_LINK_SPEED_2500MB:
     70			speed = ICE_LINK_SPEED_2500MBPS;
     71			break;
     72		case ICE_AQ_LINK_SPEED_5GB:
     73			speed = ICE_LINK_SPEED_5000MBPS;
     74			break;
     75		case ICE_AQ_LINK_SPEED_10GB:
     76			speed = ICE_LINK_SPEED_10000MBPS;
     77			break;
     78		case ICE_AQ_LINK_SPEED_20GB:
     79			speed = ICE_LINK_SPEED_20000MBPS;
     80			break;
     81		case ICE_AQ_LINK_SPEED_25GB:
     82			speed = ICE_LINK_SPEED_25000MBPS;
     83			break;
     84		case ICE_AQ_LINK_SPEED_40GB:
     85			speed = ICE_LINK_SPEED_40000MBPS;
     86			break;
     87		case ICE_AQ_LINK_SPEED_50GB:
     88			speed = ICE_LINK_SPEED_50000MBPS;
     89			break;
     90		case ICE_AQ_LINK_SPEED_100GB:
     91			speed = ICE_LINK_SPEED_100000MBPS;
     92			break;
     93		default:
     94			speed = ICE_LINK_SPEED_UNKNOWN;
     95			break;
     96		}
     97	else
     98		/* Virtchnl speeds are not defined for every speed supported in
     99		 * the hardware. To maintain compatibility with older AVF
    100		 * drivers, while reporting the speed the new speed values are
    101		 * resolved to the closest known virtchnl speeds
    102		 */
    103		switch (link_speed) {
    104		case ICE_AQ_LINK_SPEED_10MB:
    105		case ICE_AQ_LINK_SPEED_100MB:
    106			speed = (u32)VIRTCHNL_LINK_SPEED_100MB;
    107			break;
    108		case ICE_AQ_LINK_SPEED_1000MB:
    109		case ICE_AQ_LINK_SPEED_2500MB:
    110		case ICE_AQ_LINK_SPEED_5GB:
    111			speed = (u32)VIRTCHNL_LINK_SPEED_1GB;
    112			break;
    113		case ICE_AQ_LINK_SPEED_10GB:
    114			speed = (u32)VIRTCHNL_LINK_SPEED_10GB;
    115			break;
    116		case ICE_AQ_LINK_SPEED_20GB:
    117			speed = (u32)VIRTCHNL_LINK_SPEED_20GB;
    118			break;
    119		case ICE_AQ_LINK_SPEED_25GB:
    120			speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
    121			break;
    122		case ICE_AQ_LINK_SPEED_40GB:
    123		case ICE_AQ_LINK_SPEED_50GB:
    124		case ICE_AQ_LINK_SPEED_100GB:
    125			speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
    126			break;
    127		default:
    128			speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN;
    129			break;
    130		}
    131
    132	return speed;
    133}
    134
    135/* The mailbox overflow detection algorithm helps to check if there
    136 * is a possibility of a malicious VF transmitting too many MBX messages to the
    137 * PF.
    138 * 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during
    139 * driver initialization in ice_init_hw() using ice_mbx_init_snapshot().
    140 * The struct ice_mbx_snapshot helps to track and traverse a static window of
    141 * messages within the mailbox queue while looking for a malicious VF.
    142 *
    143 * 2. When the caller starts processing its mailbox queue in response to an
    144 * interrupt, the structure ice_mbx_snapshot is expected to be cleared before
    145 * the algorithm can be run for the first time for that interrupt. This can be
    146 * done via ice_mbx_reset_snapshot().
    147 *
    148 * 3. For every message read by the caller from the MBX Queue, the caller must
    149 * call the detection algorithm's entry function ice_mbx_vf_state_handler().
    150 * Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is
    151 * filled as it is required to be passed to the algorithm.
    152 *
    153 * 4. Every time a message is read from the MBX queue, a VFId is received which
    154 * is passed to the state handler. The boolean output is_malvf of the state
    155 * handler ice_mbx_vf_state_handler() serves as an indicator to the caller
    156 * whether this VF is malicious or not.
    157 *
    158 * 5. When a VF is identified to be malicious, the caller can send a message
    159 * to the system administrator. The caller can invoke ice_mbx_report_malvf()
    160 * to help determine if a malicious VF is to be reported or not. This function
    161 * requires the caller to maintain a global bitmap to track all malicious VFs
    162 * and pass that to ice_mbx_report_malvf() along with the VFID which was identified
    163 * to be malicious by ice_mbx_vf_state_handler().
    164 *
    165 * 6. The global bitmap maintained by PF can be cleared completely if PF is in
    166 * reset or the bit corresponding to a VF can be cleared if that VF is in reset.
    167 * When a VF is shut down and brought back up, we assume that the new VF
    168 * brought up is not malicious and hence report it if found malicious.
    169 *
    170 * 7. The function ice_mbx_reset_snapshot() is called to reset the information
    171 * in ice_mbx_snapshot for every new mailbox interrupt handled.
    172 *
    173 * 8. The memory allocated for variables in ice_mbx_snapshot is de-allocated
    174 * when driver is unloaded.
    175 */
    176#define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M)
    177/* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that
    178 * the max messages check must be ignored in the algorithm
    179 */
    180#define ICE_IGNORE_MAX_MSG_CNT	0xFFFF
    181
    182/**
    183 * ice_mbx_traverse - Pass through mailbox snapshot
    184 * @hw: pointer to the HW struct
    185 * @new_state: new algorithm state
    186 *
    187 * Traversing the mailbox static snapshot without checking
    188 * for malicious VFs.
    189 */
    190static void
    191ice_mbx_traverse(struct ice_hw *hw,
    192		 enum ice_mbx_snapshot_state *new_state)
    193{
    194	struct ice_mbx_snap_buffer_data *snap_buf;
    195	u32 num_iterations;
    196
    197	snap_buf = &hw->mbx_snapshot.mbx_buf;
    198
    199	/* As mailbox buffer is circular, applying a mask
    200	 * on the incremented iteration count.
    201	 */
    202	num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations);
    203
    204	/* Checking either of the below conditions to exit snapshot traversal:
    205	 * Condition-1: If the number of iterations in the mailbox is equal to
    206	 * the mailbox head which would indicate that we have reached the end
    207	 * of the static snapshot.
    208	 * Condition-2: If the maximum messages serviced in the mailbox for a
    209	 * given interrupt is the highest possible value then there is no need
    210	 * to check if the number of messages processed is equal to it. If not
    211	 * check if the number of messages processed is greater than or equal
    212	 * to the maximum number of mailbox entries serviced in current work item.
    213	 */
    214	if (num_iterations == snap_buf->head ||
    215	    (snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT &&
    216	     ++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx))
    217		*new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
    218}
    219
    220/**
    221 * ice_mbx_detect_malvf - Detect malicious VF in snapshot
    222 * @hw: pointer to the HW struct
    223 * @vf_id: relative virtual function ID
    224 * @new_state: new algorithm state
    225 * @is_malvf: boolean output to indicate if VF is malicious
    226 *
    227 * This function tracks the number of asynchronous messages
    228 * sent per VF and marks the VF as malicious if it exceeds
    229 * the permissible number of messages to send.
    230 */
    231static int
    232ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
    233		     enum ice_mbx_snapshot_state *new_state,
    234		     bool *is_malvf)
    235{
    236	struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
    237
    238	if (vf_id >= snap->mbx_vf.vfcntr_len)
    239		return -EIO;
    240
    241	/* increment the message count in the VF array */
    242	snap->mbx_vf.vf_cntr[vf_id]++;
    243
    244	if (snap->mbx_vf.vf_cntr[vf_id] >= ICE_ASYNC_VF_MSG_THRESHOLD)
    245		*is_malvf = true;
    246
    247	/* continue to iterate through the mailbox snapshot */
    248	ice_mbx_traverse(hw, new_state);
    249
    250	return 0;
    251}
    252
    253/**
    254 * ice_mbx_reset_snapshot - Reset mailbox snapshot structure
    255 * @snap: pointer to mailbox snapshot structure in the ice_hw struct
    256 *
    257 * Reset the mailbox snapshot structure and clear VF counter array.
    258 */
    259static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
    260{
    261	u32 vfcntr_len;
    262
    263	if (!snap || !snap->mbx_vf.vf_cntr)
    264		return;
    265
    266	/* Clear VF counters. */
    267	vfcntr_len = snap->mbx_vf.vfcntr_len;
    268	if (vfcntr_len)
    269		memset(snap->mbx_vf.vf_cntr, 0,
    270		       (vfcntr_len * sizeof(*snap->mbx_vf.vf_cntr)));
    271
    272	/* Reset mailbox snapshot for a new capture. */
    273	memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
    274	snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
    275}
    276
    277/**
    278 * ice_mbx_vf_state_handler - Handle states of the overflow algorithm
    279 * @hw: pointer to the HW struct
    280 * @mbx_data: pointer to structure containing mailbox data
    281 * @vf_id: relative virtual function (VF) ID
    282 * @is_malvf: boolean output to indicate if VF is malicious
    283 *
    284 * The function serves as an entry point for the malicious VF
    285 * detection algorithm by handling the different states and state
    286 * transitions of the algorithm:
    287 * New snapshot: This state is entered when creating a new static
    288 * snapshot. The data from any previous mailbox snapshot is
    289 * cleared and a new capture of the mailbox head and tail is
    290 * logged. This will be the new static snapshot to detect
    291 * asynchronous messages sent by VFs. On capturing the snapshot
    292 * and depending on whether the number of pending messages in that
    293 * snapshot exceed the watermark value, the state machine enters
    294 * traverse or detect states.
    295 * Traverse: If pending message count is below watermark then iterate
    296 * through the snapshot without any action on VF.
    297 * Detect: If pending message count exceeds watermark traverse
    298 * the static snapshot and look for a malicious VF.
    299 */
    300int
    301ice_mbx_vf_state_handler(struct ice_hw *hw,
    302			 struct ice_mbx_data *mbx_data, u16 vf_id,
    303			 bool *is_malvf)
    304{
    305	struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
    306	struct ice_mbx_snap_buffer_data *snap_buf;
    307	struct ice_ctl_q_info *cq = &hw->mailboxq;
    308	enum ice_mbx_snapshot_state new_state;
    309	int status = 0;
    310
    311	if (!is_malvf || !mbx_data)
    312		return -EINVAL;
    313
    314	/* When entering the mailbox state machine assume that the VF
    315	 * is not malicious until detected.
    316	 */
    317	*is_malvf = false;
    318
    319	 /* Checking if max messages allowed to be processed while servicing current
    320	  * interrupt is not less than the defined AVF message threshold.
    321	  */
    322	if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
    323		return -EINVAL;
    324
    325	/* The watermark value should not be lesser than the threshold limit
    326	 * set for the number of asynchronous messages a VF can send to mailbox
    327	 * nor should it be greater than the maximum number of messages in the
    328	 * mailbox serviced in current interrupt.
    329	 */
    330	if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
    331	    mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
    332		return -EINVAL;
    333
    334	new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
    335	snap_buf = &snap->mbx_buf;
    336
    337	switch (snap_buf->state) {
    338	case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT:
    339		/* Clear any previously held data in mailbox snapshot structure. */
    340		ice_mbx_reset_snapshot(snap);
    341
    342		/* Collect the pending ARQ count, number of messages processed and
    343		 * the maximum number of messages allowed to be processed from the
    344		 * Mailbox for current interrupt.
    345		 */
    346		snap_buf->num_pending_arq = mbx_data->num_pending_arq;
    347		snap_buf->num_msg_proc = mbx_data->num_msg_proc;
    348		snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx;
    349
    350		/* Capture a new static snapshot of the mailbox by logging the
    351		 * head and tail of snapshot and set num_iterations to the tail
    352		 * value to mark the start of the iteration through the snapshot.
    353		 */
    354		snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean +
    355						  mbx_data->num_pending_arq);
    356		snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1);
    357		snap_buf->num_iterations = snap_buf->tail;
    358
    359		/* Pending ARQ messages returned by ice_clean_rq_elem
    360		 * is the difference between the head and tail of the
    361		 * mailbox queue. Comparing this value against the watermark
    362		 * helps to check if we potentially have malicious VFs.
    363		 */
    364		if (snap_buf->num_pending_arq >=
    365		    mbx_data->async_watermark_val) {
    366			new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
    367			status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
    368		} else {
    369			new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
    370			ice_mbx_traverse(hw, &new_state);
    371		}
    372		break;
    373
    374	case ICE_MAL_VF_DETECT_STATE_TRAVERSE:
    375		new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
    376		ice_mbx_traverse(hw, &new_state);
    377		break;
    378
    379	case ICE_MAL_VF_DETECT_STATE_DETECT:
    380		new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
    381		status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
    382		break;
    383
    384	default:
    385		new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
    386		status = -EIO;
    387	}
    388
    389	snap_buf->state = new_state;
    390
    391	return status;
    392}
    393
    394/**
    395 * ice_mbx_report_malvf - Track and note malicious VF
    396 * @hw: pointer to the HW struct
    397 * @all_malvfs: all malicious VFs tracked by PF
    398 * @bitmap_len: length of bitmap in bits
    399 * @vf_id: relative virtual function ID of the malicious VF
    400 * @report_malvf: boolean to indicate if malicious VF must be reported
    401 *
    402 * This function will update a bitmap that keeps track of the malicious
    403 * VFs attached to the PF. A malicious VF must be reported only once if
    404 * discovered between VF resets or loading so the function checks
    405 * the input vf_id against the bitmap to verify if the VF has been
    406 * detected in any previous mailbox iterations.
    407 */
    408int
    409ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
    410		     u16 bitmap_len, u16 vf_id, bool *report_malvf)
    411{
    412	if (!all_malvfs || !report_malvf)
    413		return -EINVAL;
    414
    415	*report_malvf = false;
    416
    417	if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len)
    418		return -EINVAL;
    419
    420	if (vf_id >= bitmap_len)
    421		return -EIO;
    422
    423	/* If the vf_id is found in the bitmap set bit and boolean to true */
    424	if (!test_and_set_bit(vf_id, all_malvfs))
    425		*report_malvf = true;
    426
    427	return 0;
    428}
    429
    430/**
    431 * ice_mbx_clear_malvf - Clear VF bitmap and counter for VF ID
    432 * @snap: pointer to the mailbox snapshot structure
    433 * @all_malvfs: all malicious VFs tracked by PF
    434 * @bitmap_len: length of bitmap in bits
    435 * @vf_id: relative virtual function ID of the malicious VF
    436 *
    437 * In case of a VF reset, this function can be called to clear
    438 * the bit corresponding to the VF ID in the bitmap tracking all
    439 * malicious VFs attached to the PF. The function also clears the
    440 * VF counter array at the index of the VF ID. This is to ensure
    441 * that the new VF loaded is not considered malicious before going
    442 * through the overflow detection algorithm.
    443 */
    444int
    445ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
    446		    u16 bitmap_len, u16 vf_id)
    447{
    448	if (!snap || !all_malvfs)
    449		return -EINVAL;
    450
    451	if (bitmap_len < snap->mbx_vf.vfcntr_len)
    452		return -EINVAL;
    453
    454	/* Ensure VF ID value is not larger than bitmap or VF counter length */
    455	if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len)
    456		return -EIO;
    457
    458	/* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */
    459	clear_bit(vf_id, all_malvfs);
    460
    461	/* Clear the VF counter in the mailbox snapshot structure for that VF ID.
    462	 * This is to ensure that if a VF is unloaded and a new one brought back
    463	 * up with the same VF ID for a snapshot currently in traversal or detect
    464	 * state the counter for that VF ID does not increment on top of existing
    465	 * values in the mailbox overflow detection algorithm.
    466	 */
    467	snap->mbx_vf.vf_cntr[vf_id] = 0;
    468
    469	return 0;
    470}
    471
    472/**
    473 * ice_mbx_init_snapshot - Initialize mailbox snapshot structure
    474 * @hw: pointer to the hardware structure
    475 * @vf_count: number of VFs allocated on a PF
    476 *
    477 * Clear the mailbox snapshot structure and allocate memory
    478 * for the VF counter array based on the number of VFs allocated
    479 * on that PF.
    480 *
    481 * Assumption: This function will assume ice_get_caps() has already been
    482 * called to ensure that the vf_count can be compared against the number
    483 * of VFs supported as defined in the functional capabilities of the device.
    484 */
    485int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
    486{
    487	struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
    488
    489	/* Ensure that the number of VFs allocated is non-zero and
    490	 * is not greater than the number of supported VFs defined in
    491	 * the functional capabilities of the PF.
    492	 */
    493	if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs)
    494		return -EINVAL;
    495
    496	snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count,
    497					    sizeof(*snap->mbx_vf.vf_cntr),
    498					    GFP_KERNEL);
    499	if (!snap->mbx_vf.vf_cntr)
    500		return -ENOMEM;
    501
    502	/* Setting the VF counter length to the number of allocated
    503	 * VFs for given PF's functional capabilities.
    504	 */
    505	snap->mbx_vf.vfcntr_len = vf_count;
    506
    507	/* Clear mbx_buf in the mailbox snaphot structure and setting the
    508	 * mailbox snapshot state to a new capture.
    509	 */
    510	memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
    511	snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
    512
    513	return 0;
    514}
    515
    516/**
    517 * ice_mbx_deinit_snapshot - Free mailbox snapshot structure
    518 * @hw: pointer to the hardware structure
    519 *
    520 * Clear the mailbox snapshot structure and free the VF counter array.
    521 */
    522void ice_mbx_deinit_snapshot(struct ice_hw *hw)
    523{
    524	struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
    525
    526	/* Free VF counter array and reset VF counter length */
    527	devm_kfree(ice_hw_to_dev(hw), snap->mbx_vf.vf_cntr);
    528	snap->mbx_vf.vfcntr_len = 0;
    529
    530	/* Clear mbx_buf in the mailbox snaphot structure */
    531	memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
    532}