cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vbva_base.c (5366B)


      1// SPDX-License-Identifier: MIT
      2/* Copyright (C) 2006-2017 Oracle Corporation */
      3
      4#include <linux/vbox_err.h>
      5#include "vbox_drv.h"
      6#include "vboxvideo_guest.h"
      7#include "hgsmi_channels.h"
      8
      9/*
     10 * There is a hardware ring buffer in the graphics device video RAM, formerly
     11 * in the VBox VMMDev PCI memory space.
     12 * All graphics commands go there serialized by vbva_buffer_begin_update.
     13 * and vbva_buffer_end_update.
     14 *
     15 * free_offset is writing position. data_offset is reading position.
     16 * free_offset == data_offset means buffer is empty.
     17 * There must be always gap between data_offset and free_offset when data
     18 * are in the buffer.
     19 * Guest only changes free_offset, host changes data_offset.
     20 */
     21
     22static u32 vbva_buffer_available(const struct vbva_buffer *vbva)
     23{
     24	s32 diff = vbva->data_offset - vbva->free_offset;
     25
     26	return diff > 0 ? diff : vbva->data_len + diff;
     27}
     28
     29static void vbva_buffer_place_data_at(struct vbva_buf_ctx *vbva_ctx,
     30				      const void *p, u32 len, u32 offset)
     31{
     32	struct vbva_buffer *vbva = vbva_ctx->vbva;
     33	u32 bytes_till_boundary = vbva->data_len - offset;
     34	u8 *dst = &vbva->data[offset];
     35	s32 diff = len - bytes_till_boundary;
     36
     37	if (diff <= 0) {
     38		/* Chunk will not cross buffer boundary. */
     39		memcpy(dst, p, len);
     40	} else {
     41		/* Chunk crosses buffer boundary. */
     42		memcpy(dst, p, bytes_till_boundary);
     43		memcpy(&vbva->data[0], (u8 *)p + bytes_till_boundary, diff);
     44	}
     45}
     46
     47static void vbva_buffer_flush(struct gen_pool *ctx)
     48{
     49	struct vbva_flush *p;
     50
     51	p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_FLUSH);
     52	if (!p)
     53		return;
     54
     55	p->reserved = 0;
     56
     57	hgsmi_buffer_submit(ctx, p);
     58	hgsmi_buffer_free(ctx, p);
     59}
     60
     61bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
     62		const void *p, u32 len)
     63{
     64	struct vbva_record *record;
     65	struct vbva_buffer *vbva;
     66	u32 available;
     67
     68	vbva = vbva_ctx->vbva;
     69	record = vbva_ctx->record;
     70
     71	if (!vbva || vbva_ctx->buffer_overflow ||
     72	    !record || !(record->len_and_flags & VBVA_F_RECORD_PARTIAL))
     73		return false;
     74
     75	available = vbva_buffer_available(vbva);
     76
     77	while (len > 0) {
     78		u32 chunk = len;
     79
     80		if (chunk >= available) {
     81			vbva_buffer_flush(ctx);
     82			available = vbva_buffer_available(vbva);
     83		}
     84
     85		if (chunk >= available) {
     86			if (WARN_ON(available <= vbva->partial_write_tresh)) {
     87				vbva_ctx->buffer_overflow = true;
     88				return false;
     89			}
     90			chunk = available - vbva->partial_write_tresh;
     91		}
     92
     93		vbva_buffer_place_data_at(vbva_ctx, p, chunk,
     94					  vbva->free_offset);
     95
     96		vbva->free_offset = (vbva->free_offset + chunk) %
     97				    vbva->data_len;
     98		record->len_and_flags += chunk;
     99		available -= chunk;
    100		len -= chunk;
    101		p += chunk;
    102	}
    103
    104	return true;
    105}
    106
    107static bool vbva_inform_host(struct vbva_buf_ctx *vbva_ctx,
    108			     struct gen_pool *ctx, s32 screen, bool enable)
    109{
    110	struct vbva_enable_ex *p;
    111	bool ret;
    112
    113	p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_ENABLE);
    114	if (!p)
    115		return false;
    116
    117	p->base.flags = enable ? VBVA_F_ENABLE : VBVA_F_DISABLE;
    118	p->base.offset = vbva_ctx->buffer_offset;
    119	p->base.result = VERR_NOT_SUPPORTED;
    120	if (screen >= 0) {
    121		p->base.flags |= VBVA_F_EXTENDED | VBVA_F_ABSOFFSET;
    122		p->screen_id = screen;
    123	}
    124
    125	hgsmi_buffer_submit(ctx, p);
    126
    127	if (enable)
    128		ret = p->base.result >= 0;
    129	else
    130		ret = true;
    131
    132	hgsmi_buffer_free(ctx, p);
    133
    134	return ret;
    135}
    136
    137bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
    138		 struct vbva_buffer *vbva, s32 screen)
    139{
    140	bool ret = false;
    141
    142	memset(vbva, 0, sizeof(*vbva));
    143	vbva->partial_write_tresh = 256;
    144	vbva->data_len = vbva_ctx->buffer_length - sizeof(struct vbva_buffer);
    145	vbva_ctx->vbva = vbva;
    146
    147	ret = vbva_inform_host(vbva_ctx, ctx, screen, true);
    148	if (!ret)
    149		vbva_disable(vbva_ctx, ctx, screen);
    150
    151	return ret;
    152}
    153
    154void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
    155		  s32 screen)
    156{
    157	vbva_ctx->buffer_overflow = false;
    158	vbva_ctx->record = NULL;
    159	vbva_ctx->vbva = NULL;
    160
    161	vbva_inform_host(vbva_ctx, ctx, screen, false);
    162}
    163
    164bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx,
    165			      struct gen_pool *ctx)
    166{
    167	struct vbva_record *record;
    168	u32 next;
    169
    170	if (!vbva_ctx->vbva ||
    171	    !(vbva_ctx->vbva->host_flags.host_events & VBVA_F_MODE_ENABLED))
    172		return false;
    173
    174	WARN_ON(vbva_ctx->buffer_overflow || vbva_ctx->record);
    175
    176	next = (vbva_ctx->vbva->record_free_index + 1) % VBVA_MAX_RECORDS;
    177
    178	/* Flush if all slots in the records queue are used */
    179	if (next == vbva_ctx->vbva->record_first_index)
    180		vbva_buffer_flush(ctx);
    181
    182	/* If even after flush there is no place then fail the request */
    183	if (next == vbva_ctx->vbva->record_first_index)
    184		return false;
    185
    186	record = &vbva_ctx->vbva->records[vbva_ctx->vbva->record_free_index];
    187	record->len_and_flags = VBVA_F_RECORD_PARTIAL;
    188	vbva_ctx->vbva->record_free_index = next;
    189	/* Remember which record we are using. */
    190	vbva_ctx->record = record;
    191
    192	return true;
    193}
    194
    195void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx)
    196{
    197	struct vbva_record *record = vbva_ctx->record;
    198
    199	WARN_ON(!vbva_ctx->vbva || !record ||
    200		!(record->len_and_flags & VBVA_F_RECORD_PARTIAL));
    201
    202	/* Mark the record completed. */
    203	record->len_and_flags &= ~VBVA_F_RECORD_PARTIAL;
    204
    205	vbva_ctx->buffer_overflow = false;
    206	vbva_ctx->record = NULL;
    207}
    208
    209void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx,
    210			       u32 buffer_offset, u32 buffer_length)
    211{
    212	vbva_ctx->buffer_offset = buffer_offset;
    213	vbva_ctx->buffer_length = buffer_length;
    214}