cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hypervisor.h (117649B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _SPARC64_HYPERVISOR_H
      3#define _SPARC64_HYPERVISOR_H
      4
      5/* Sun4v hypervisor interfaces and defines.
      6 *
      7 * Hypervisor calls are made via traps to software traps number 0x80
      8 * and above.  Registers %o0 to %o5 serve as argument, status, and
      9 * return value registers.
     10 *
     11 * There are two kinds of these traps.  First there are the normal
     12 * "fast traps" which use software trap 0x80 and encode the function
     13 * to invoke by number in register %o5.  Argument and return value
     14 * handling is as follows:
     15 *
     16 * -----------------------------------------------
     17 * |  %o5  | function number |     undefined     |
     18 * |  %o0  |   argument 0    |   return status   |
     19 * |  %o1  |   argument 1    |   return value 1  |
     20 * |  %o2  |   argument 2    |   return value 2  |
     21 * |  %o3  |   argument 3    |   return value 3  |
     22 * |  %o4  |   argument 4    |   return value 4  |
     23 * -----------------------------------------------
     24 *
     25 * The second type are "hyper-fast traps" which encode the function
     26 * number in the software trap number itself.  So these use trap
     27 * numbers > 0x80.  The register usage for hyper-fast traps is as
     28 * follows:
     29 *
     30 * -----------------------------------------------
     31 * |  %o0  |   argument 0    |   return status   |
     32 * |  %o1  |   argument 1    |   return value 1  |
     33 * |  %o2  |   argument 2    |   return value 2  |
     34 * |  %o3  |   argument 3    |   return value 3  |
     35 * |  %o4  |   argument 4    |   return value 4  |
     36 * -----------------------------------------------
     37 *
     38 * Registers providing explicit arguments to the hypervisor calls
     39 * are volatile across the call.  Upon return their values are
     40 * undefined unless explicitly specified as containing a particular
     41 * return value by the specific call.  The return status is always
     42 * returned in register %o0, zero indicates a successful execution of
     43 * the hypervisor call and other values indicate an error status as
     44 * defined below.  So, for example, if a hyper-fast trap takes
     45 * arguments 0, 1, and 2, then %o0, %o1, and %o2 are volatile across
     46 * the call and %o3, %o4, and %o5 would be preserved.
     47 *
     48 * If the hypervisor trap is invalid, or the fast trap function number
     49 * is invalid, HV_EBADTRAP will be returned in %o0.  Also, all 64-bits
     50 * of the argument and return values are significant.
     51 */
     52
     53/* Trap numbers.  */
     54#define HV_FAST_TRAP		0x80
     55#define HV_MMU_MAP_ADDR_TRAP	0x83
     56#define HV_MMU_UNMAP_ADDR_TRAP	0x84
     57#define HV_TTRACE_ADDENTRY_TRAP	0x85
     58#define HV_CORE_TRAP		0xff
     59
     60/* Error codes.  */
     61#define HV_EOK				0  /* Successful return            */
     62#define HV_ENOCPU			1  /* Invalid CPU id               */
     63#define HV_ENORADDR			2  /* Invalid real address         */
     64#define HV_ENOINTR			3  /* Invalid interrupt id         */
     65#define HV_EBADPGSZ			4  /* Invalid pagesize encoding    */
     66#define HV_EBADTSB			5  /* Invalid TSB description      */
     67#define HV_EINVAL			6  /* Invalid argument             */
     68#define HV_EBADTRAP			7  /* Invalid function number      */
     69#define HV_EBADALIGN			8  /* Invalid address alignment    */
     70#define HV_EWOULDBLOCK			9  /* Cannot complete w/o blocking */
     71#define HV_ENOACCESS			10 /* No access to resource        */
     72#define HV_EIO				11 /* I/O error                    */
     73#define HV_ECPUERROR			12 /* CPU in error state           */
     74#define HV_ENOTSUPPORTED		13 /* Function not supported       */
     75#define HV_ENOMAP			14 /* No mapping found             */
     76#define HV_ETOOMANY			15 /* Too many items specified     */
     77#define HV_ECHANNEL			16 /* Invalid LDC channel          */
     78#define HV_EBUSY			17 /* Resource busy                */
     79#define HV_EUNAVAILABLE			23 /* Resource or operation not
     80					    * currently available, but may
     81					    * become available in the future
     82					    */
     83
     84/* mach_exit()
     85 * TRAP:	HV_FAST_TRAP
     86 * FUNCTION:	HV_FAST_MACH_EXIT
     87 * ARG0:	exit code
     88 * ERRORS:	This service does not return.
     89 *
     90 * Stop all CPUs in the virtual domain and place them into the stopped
     91 * state.  The 64-bit exit code may be passed to a service entity as
     92 * the domain's exit status.  On systems without a service entity, the
     93 * domain will undergo a reset, and the boot firmware will be
     94 * reloaded.
     95 *
     96 * This function will never return to the guest that invokes it.
     97 *
     98 * Note: By convention an exit code of zero denotes a successful exit by
     99 *       the guest code.  A non-zero exit code denotes a guest specific
    100 *       error indication.
    101 *
    102 */
    103#define HV_FAST_MACH_EXIT		0x00
    104
    105#ifndef __ASSEMBLY__
    106void sun4v_mach_exit(unsigned long exit_code);
    107#endif
    108
    109/* Domain services.  */
    110
    111/* mach_desc()
    112 * TRAP:	HV_FAST_TRAP
    113 * FUNCTION:	HV_FAST_MACH_DESC
    114 * ARG0:	buffer
    115 * ARG1:	length
    116 * RET0:	status
    117 * RET1:	length
    118 * ERRORS:	HV_EBADALIGN	Buffer is badly aligned
    119 *		HV_ENORADDR	Buffer is to an illegal real address.
    120 *		HV_EINVAL	Buffer length is too small for complete
    121 *				machine description.
    122 *
    123 * Copy the most current machine description into the buffer indicated
    124 * by the real address in ARG0.  The buffer provided must be 16 byte
    125 * aligned.  Upon success or HV_EINVAL, this service returns the
    126 * actual size of the machine description in the RET1 return value.
    127 *
    128 * Note: A method of determining the appropriate buffer size for the
    129 *       machine description is to first call this service with a buffer
    130 *       length of 0 bytes.
    131 */
    132#define HV_FAST_MACH_DESC		0x01
    133
    134#ifndef __ASSEMBLY__
    135unsigned long sun4v_mach_desc(unsigned long buffer_pa,
    136			      unsigned long buf_len,
    137			      unsigned long *real_buf_len);
    138#endif
    139
    140/* mach_sir()
    141 * TRAP:	HV_FAST_TRAP
    142 * FUNCTION:	HV_FAST_MACH_SIR
    143 * ERRORS:	This service does not return.
    144 *
    145 * Perform a software initiated reset of the virtual machine domain.
    146 * All CPUs are captured as soon as possible, all hardware devices are
    147 * returned to the entry default state, and the domain is restarted at
    148 * the SIR (trap type 0x04) real trap table (RTBA) entry point on one
    149 * of the CPUs.  The single CPU restarted is selected as determined by
    150 * platform specific policy.  Memory is preserved across this
    151 * operation.
    152 */
    153#define HV_FAST_MACH_SIR		0x02
    154
    155#ifndef __ASSEMBLY__
    156void sun4v_mach_sir(void);
    157#endif
    158
    159/* mach_set_watchdog()
    160 * TRAP:	HV_FAST_TRAP
    161 * FUNCTION:	HV_FAST_MACH_SET_WATCHDOG
    162 * ARG0:	timeout in milliseconds
    163 * RET0:	status
    164 * RET1:	time remaining in milliseconds
    165 *
    166 * A guest uses this API to set a watchdog timer.  Once the gues has set
    167 * the timer, it must call the timer service again either to disable or
    168 * postpone the expiration.  If the timer expires before being reset or
    169 * disabled, then the hypervisor take a platform specific action leading
    170 * to guest termination within a bounded time period.  The platform action
    171 * may include recovery actions such as reporting the expiration to a
    172 * Service Processor, and/or automatically restarting the gues.
    173 *
    174 * The 'timeout' parameter is specified in milliseconds, however the
    175 * implementated granularity is given by the 'watchdog-resolution'
    176 * property in the 'platform' node of the guest's machine description.
    177 * The largest allowed timeout value is specified by the
    178 * 'watchdog-max-timeout' property of the 'platform' node.
    179 *
    180 * If the 'timeout' argument is not zero, the watchdog timer is set to
    181 * expire after a minimum of 'timeout' milliseconds.
    182 *
    183 * If the 'timeout' argument is zero, the watchdog timer is disabled.
    184 *
    185 * If the 'timeout' value exceeds the value of the 'max-watchdog-timeout'
    186 * property, the hypervisor leaves the watchdog timer state unchanged,
    187 * and returns a status of EINVAL.
    188 *
    189 * The 'time remaining' return value is valid regardless of whether the
    190 * return status is EOK or EINVAL.  A non-zero return value indicates the
    191 * number of milliseconds that were remaining until the timer was to expire.
    192 * If less than one millisecond remains, the return value is '1'.  If the
    193 * watchdog timer was disabled at the time of the call, the return value is
    194 * zero.
    195 *
    196 * If the hypervisor cannot support the exact timeout value requested, but
    197 * can support a larger timeout value, the hypervisor may round the actual
    198 * timeout to a value larger than the requested timeout, consequently the
    199 * 'time remaining' return value may be larger than the previously requested
    200 * timeout value.
    201 *
    202 * Any guest OS debugger should be aware that the watchdog service may be in
    203 * use.  Consequently, it is recommended that the watchdog service is
    204 * disabled upon debugger entry (e.g. reaching a breakpoint), and then
    205 * re-enabled upon returning to normal execution.  The API has been designed
    206 * with this in mind, and the 'time remaining' result of the disable call may
    207 * be used directly as the timeout argument of the re-enable call.
    208 */
    209#define HV_FAST_MACH_SET_WATCHDOG	0x05
    210
    211#ifndef __ASSEMBLY__
    212unsigned long sun4v_mach_set_watchdog(unsigned long timeout,
    213				      unsigned long *orig_timeout);
    214#endif
    215
    216/* CPU services.
    217 *
    218 * CPUs represent devices that can execute software threads.  A single
    219 * chip that contains multiple cores or strands is represented as
    220 * multiple CPUs with unique CPU identifiers.  CPUs are exported to
    221 * OBP via the machine description (and to the OS via the OBP device
    222 * tree).  CPUs are always in one of three states: stopped, running,
    223 * or error.
    224 *
    225 * A CPU ID is a pre-assigned 16-bit value that uniquely identifies a
    226 * CPU within a logical domain.  Operations that are to be performed
    227 * on multiple CPUs specify them via a CPU list.  A CPU list is an
    228 * array in real memory, of which each 16-bit word is a CPU ID.  CPU
    229 * lists are passed through the API as two arguments.  The first is
    230 * the number of entries (16-bit words) in the CPU list, and the
    231 * second is the (real address) pointer to the CPU ID list.
    232 */
    233
    234/* cpu_start()
    235 * TRAP:	HV_FAST_TRAP
    236 * FUNCTION:	HV_FAST_CPU_START
    237 * ARG0:	CPU ID
    238 * ARG1:	PC
    239 * ARG2:	RTBA
    240 * ARG3:	target ARG0
    241 * RET0:	status
    242 * ERRORS:	ENOCPU		Invalid CPU ID
    243 *		EINVAL		Target CPU ID is not in the stopped state
    244 *		ENORADDR	Invalid PC or RTBA real address
    245 *		EBADALIGN	Unaligned PC or unaligned RTBA
    246 *		EWOULDBLOCK	Starting resources are not available
    247 *
    248 * Start CPU with given CPU ID with PC in %pc and with a real trap
    249 * base address value of RTBA.  The indicated CPU must be in the
    250 * stopped state.  The supplied RTBA must be aligned on a 256 byte
    251 * boundary.  On successful completion, the specified CPU will be in
    252 * the running state and will be supplied with "target ARG0" in %o0
    253 * and RTBA in %tba.
    254 */
    255#define HV_FAST_CPU_START		0x10
    256
    257#ifndef __ASSEMBLY__
    258unsigned long sun4v_cpu_start(unsigned long cpuid,
    259			      unsigned long pc,
    260			      unsigned long rtba,
    261			      unsigned long arg0);
    262#endif
    263
    264/* cpu_stop()
    265 * TRAP:	HV_FAST_TRAP
    266 * FUNCTION:	HV_FAST_CPU_STOP
    267 * ARG0:	CPU ID
    268 * RET0:	status
    269 * ERRORS:	ENOCPU		Invalid CPU ID
    270 *		EINVAL		Target CPU ID is the current cpu
    271 *		EINVAL		Target CPU ID is not in the running state
    272 *		EWOULDBLOCK	Stopping resources are not available
    273 *		ENOTSUPPORTED	Not supported on this platform
    274 *
    275 * The specified CPU is stopped.  The indicated CPU must be in the
    276 * running state.  On completion, it will be in the stopped state.  It
    277 * is not legal to stop the current CPU.
    278 *
    279 * Note: As this service cannot be used to stop the current cpu, this service
    280 *       may not be used to stop the last running CPU in a domain.  To stop
    281 *       and exit a running domain, a guest must use the mach_exit() service.
    282 */
    283#define HV_FAST_CPU_STOP		0x11
    284
    285#ifndef __ASSEMBLY__
    286unsigned long sun4v_cpu_stop(unsigned long cpuid);
    287#endif
    288
    289/* cpu_yield()
    290 * TRAP:	HV_FAST_TRAP
    291 * FUNCTION:	HV_FAST_CPU_YIELD
    292 * RET0:	status
    293 * ERRORS:	No possible error.
    294 *
    295 * Suspend execution on the current CPU.  Execution will resume when
    296 * an interrupt (device, %stick_compare, or cross-call) is targeted to
    297 * the CPU.  On some CPUs, this API may be used by the hypervisor to
    298 * save power by disabling hardware strands.
    299 */
    300#define HV_FAST_CPU_YIELD		0x12
    301
    302#ifndef __ASSEMBLY__
    303unsigned long sun4v_cpu_yield(void);
    304#endif
    305
    306/* cpu_poke()
    307 * TRAP:	HV_FAST_TRAP
    308 * FUNCTION:	HV_FAST_CPU_POKE
    309 * RET0:	status
    310 * ERRORS:	ENOCPU		cpuid refers to a CPU that does not exist
    311 *		EINVAL		cpuid is current CPU
    312 *
    313 * Poke CPU cpuid. If the target CPU is currently suspended having
    314 * invoked the cpu-yield service, that vCPU will be resumed.
    315 * Poke interrupts may only be sent to valid, non-local CPUs.
    316 * It is not legal to poke the current vCPU.
    317 */
    318#define HV_FAST_CPU_POKE                0x13
    319
    320#ifndef __ASSEMBLY__
    321unsigned long sun4v_cpu_poke(unsigned long cpuid);
    322#endif
    323
    324/* cpu_qconf()
    325 * TRAP:	HV_FAST_TRAP
    326 * FUNCTION:	HV_FAST_CPU_QCONF
    327 * ARG0:	queue
    328 * ARG1:	base real address
    329 * ARG2:	number of entries
    330 * RET0:	status
    331 * ERRORS:	ENORADDR	Invalid base real address
    332 *		EINVAL		Invalid queue or number of entries is less
    333 *				than 2 or too large.
    334 *		EBADALIGN	Base real address is not correctly aligned
    335 *				for size.
    336 *
    337 * Configure the given queue to be placed at the given base real
    338 * address, with the given number of entries.  The number of entries
    339 * must be a power of 2.  The base real address must be aligned
    340 * exactly to match the queue size.  Each queue entry is 64 bytes
    341 * long, so for example a 32 entry queue must be aligned on a 2048
    342 * byte real address boundary.
    343 *
    344 * The specified queue is unconfigured if the number of entries is given
    345 * as zero.
    346 *
    347 * For the current version of this API service, the argument queue is defined
    348 * as follows:
    349 *
    350 *	queue		description
    351 *	-----		-------------------------
    352 *	0x3c		cpu mondo queue
    353 *	0x3d		device mondo queue
    354 *	0x3e		resumable error queue
    355 *	0x3f		non-resumable error queue
    356 *
    357 * Note: The maximum number of entries for each queue for a specific cpu may
    358 *       be determined from the machine description.
    359 */
    360#define HV_FAST_CPU_QCONF		0x14
    361#define  HV_CPU_QUEUE_CPU_MONDO		 0x3c
    362#define  HV_CPU_QUEUE_DEVICE_MONDO	 0x3d
    363#define  HV_CPU_QUEUE_RES_ERROR		 0x3e
    364#define  HV_CPU_QUEUE_NONRES_ERROR	 0x3f
    365
    366#ifndef __ASSEMBLY__
    367unsigned long sun4v_cpu_qconf(unsigned long type,
    368			      unsigned long queue_paddr,
    369			      unsigned long num_queue_entries);
    370#endif
    371
    372/* cpu_qinfo()
    373 * TRAP:	HV_FAST_TRAP
    374 * FUNCTION:	HV_FAST_CPU_QINFO
    375 * ARG0:	queue
    376 * RET0:	status
    377 * RET1:	base real address
    378 * RET1:	number of entries
    379 * ERRORS:	EINVAL		Invalid queue
    380 *
    381 * Return the configuration info for the given queue.  The base real
    382 * address and number of entries of the defined queue are returned.
    383 * The queue argument values are the same as for cpu_qconf() above.
    384 *
    385 * If the specified queue is a valid queue number, but no queue has
    386 * been defined, the number of entries will be set to zero and the
    387 * base real address returned is undefined.
    388 */
    389#define HV_FAST_CPU_QINFO		0x15
    390
    391/* cpu_mondo_send()
    392 * TRAP:	HV_FAST_TRAP
    393 * FUNCTION:	HV_FAST_CPU_MONDO_SEND
    394 * ARG0-1:	CPU list
    395 * ARG2:	data real address
    396 * RET0:	status
    397 * ERRORS:	EBADALIGN	Mondo data is not 64-byte aligned or CPU list
    398 *				is not 2-byte aligned.
    399 *		ENORADDR	Invalid data mondo address, or invalid cpu list
    400 *				address.
    401 *		ENOCPU		Invalid cpu in CPU list
    402 *		EWOULDBLOCK	Some or all of the listed CPUs did not receive
    403 *				the mondo
    404 *		ECPUERROR	One or more of the listed CPUs are in error
    405 *				state, use HV_FAST_CPU_STATE to see which ones
    406 *		EINVAL		CPU list includes caller's CPU ID
    407 *
    408 * Send a mondo interrupt to the CPUs in the given CPU list with the
    409 * 64-bytes at the given data real address.  The data must be 64-byte
    410 * aligned.  The mondo data will be delivered to the cpu_mondo queues
    411 * of the recipient CPUs.
    412 *
    413 * In all cases, error or not, the CPUs in the CPU list to which the
    414 * mondo has been successfully delivered will be indicated by having
    415 * their entry in CPU list updated with the value 0xffff.
    416 */
    417#define HV_FAST_CPU_MONDO_SEND		0x42
    418
    419#ifndef __ASSEMBLY__
    420unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count,
    421				   unsigned long cpu_list_pa,
    422				   unsigned long mondo_block_pa);
    423#endif
    424
    425/* cpu_myid()
    426 * TRAP:	HV_FAST_TRAP
    427 * FUNCTION:	HV_FAST_CPU_MYID
    428 * RET0:	status
    429 * RET1:	CPU ID
    430 * ERRORS:	No errors defined.
    431 *
    432 * Return the hypervisor ID handle for the current CPU.  Use by a
    433 * virtual CPU to discover it's own identity.
    434 */
    435#define HV_FAST_CPU_MYID		0x16
    436
    437/* cpu_state()
    438 * TRAP:	HV_FAST_TRAP
    439 * FUNCTION:	HV_FAST_CPU_STATE
    440 * ARG0:	CPU ID
    441 * RET0:	status
    442 * RET1:	state
    443 * ERRORS:	ENOCPU		Invalid CPU ID
    444 *
    445 * Retrieve the current state of the CPU with the given CPU ID.
    446 */
    447#define HV_FAST_CPU_STATE		0x17
    448#define  HV_CPU_STATE_STOPPED		 0x01
    449#define  HV_CPU_STATE_RUNNING		 0x02
    450#define  HV_CPU_STATE_ERROR		 0x03
    451
    452#ifndef __ASSEMBLY__
    453long sun4v_cpu_state(unsigned long cpuid);
    454#endif
    455
    456/* cpu_set_rtba()
    457 * TRAP:	HV_FAST_TRAP
    458 * FUNCTION:	HV_FAST_CPU_SET_RTBA
    459 * ARG0:	RTBA
    460 * RET0:	status
    461 * RET1:	previous RTBA
    462 * ERRORS:	ENORADDR	Invalid RTBA real address
    463 *		EBADALIGN	RTBA is incorrectly aligned for a trap table
    464 *
    465 * Set the real trap base address of the local cpu to the given RTBA.
    466 * The supplied RTBA must be aligned on a 256 byte boundary.  Upon
    467 * success the previous value of the RTBA is returned in RET1.
    468 *
    469 * Note: This service does not affect %tba
    470 */
    471#define HV_FAST_CPU_SET_RTBA		0x18
    472
    473/* cpu_set_rtba()
    474 * TRAP:	HV_FAST_TRAP
    475 * FUNCTION:	HV_FAST_CPU_GET_RTBA
    476 * RET0:	status
    477 * RET1:	previous RTBA
    478 * ERRORS:	No possible error.
    479 *
    480 * Returns the current value of RTBA in RET1.
    481 */
    482#define HV_FAST_CPU_GET_RTBA		0x19
    483
    484/* MMU services.
    485 *
    486 * Layout of a TSB description for mmu_tsb_ctx{,non}0() calls.
    487 */
    488#ifndef __ASSEMBLY__
    489struct hv_tsb_descr {
    490	unsigned short		pgsz_idx;
    491	unsigned short		assoc;
    492	unsigned int		num_ttes;	/* in TTEs */
    493	unsigned int		ctx_idx;
    494	unsigned int		pgsz_mask;
    495	unsigned long		tsb_base;
    496	unsigned long		resv;
    497};
    498#endif
    499#define HV_TSB_DESCR_PGSZ_IDX_OFFSET	0x00
    500#define HV_TSB_DESCR_ASSOC_OFFSET	0x02
    501#define HV_TSB_DESCR_NUM_TTES_OFFSET	0x04
    502#define HV_TSB_DESCR_CTX_IDX_OFFSET	0x08
    503#define HV_TSB_DESCR_PGSZ_MASK_OFFSET	0x0c
    504#define HV_TSB_DESCR_TSB_BASE_OFFSET	0x10
    505#define HV_TSB_DESCR_RESV_OFFSET	0x18
    506
    507/* Page size bitmask.  */
    508#define HV_PGSZ_MASK_8K			(1 << 0)
    509#define HV_PGSZ_MASK_64K		(1 << 1)
    510#define HV_PGSZ_MASK_512K		(1 << 2)
    511#define HV_PGSZ_MASK_4MB		(1 << 3)
    512#define HV_PGSZ_MASK_32MB		(1 << 4)
    513#define HV_PGSZ_MASK_256MB		(1 << 5)
    514#define HV_PGSZ_MASK_2GB		(1 << 6)
    515#define HV_PGSZ_MASK_16GB		(1 << 7)
    516
    517/* Page size index.  The value given in the TSB descriptor must correspond
    518 * to the smallest page size specified in the pgsz_mask page size bitmask.
    519 */
    520#define HV_PGSZ_IDX_8K			0
    521#define HV_PGSZ_IDX_64K			1
    522#define HV_PGSZ_IDX_512K		2
    523#define HV_PGSZ_IDX_4MB			3
    524#define HV_PGSZ_IDX_32MB		4
    525#define HV_PGSZ_IDX_256MB		5
    526#define HV_PGSZ_IDX_2GB			6
    527#define HV_PGSZ_IDX_16GB		7
    528
    529/* MMU fault status area.
    530 *
    531 * MMU related faults have their status and fault address information
    532 * placed into a memory region made available by privileged code.  Each
    533 * virtual processor must make a mmu_fault_area_conf() call to tell the
    534 * hypervisor where that processor's fault status should be stored.
    535 *
    536 * The fault status block is a multiple of 64-bytes and must be aligned
    537 * on a 64-byte boundary.
    538 */
    539#ifndef __ASSEMBLY__
    540struct hv_fault_status {
    541	unsigned long		i_fault_type;
    542	unsigned long		i_fault_addr;
    543	unsigned long		i_fault_ctx;
    544	unsigned long		i_reserved[5];
    545	unsigned long		d_fault_type;
    546	unsigned long		d_fault_addr;
    547	unsigned long		d_fault_ctx;
    548	unsigned long		d_reserved[5];
    549};
    550#endif
    551#define HV_FAULT_I_TYPE_OFFSET	0x00
    552#define HV_FAULT_I_ADDR_OFFSET	0x08
    553#define HV_FAULT_I_CTX_OFFSET	0x10
    554#define HV_FAULT_D_TYPE_OFFSET	0x40
    555#define HV_FAULT_D_ADDR_OFFSET	0x48
    556#define HV_FAULT_D_CTX_OFFSET	0x50
    557
    558#define HV_FAULT_TYPE_FAST_MISS	1
    559#define HV_FAULT_TYPE_FAST_PROT	2
    560#define HV_FAULT_TYPE_MMU_MISS	3
    561#define HV_FAULT_TYPE_INV_RA	4
    562#define HV_FAULT_TYPE_PRIV_VIOL	5
    563#define HV_FAULT_TYPE_PROT_VIOL	6
    564#define HV_FAULT_TYPE_NFO	7
    565#define HV_FAULT_TYPE_NFO_SEFF	8
    566#define HV_FAULT_TYPE_INV_VA	9
    567#define HV_FAULT_TYPE_INV_ASI	10
    568#define HV_FAULT_TYPE_NC_ATOMIC	11
    569#define HV_FAULT_TYPE_PRIV_ACT	12
    570#define HV_FAULT_TYPE_RESV1	13
    571#define HV_FAULT_TYPE_UNALIGNED	14
    572#define HV_FAULT_TYPE_INV_PGSZ	15
    573#define HV_FAULT_TYPE_MCD	17
    574#define HV_FAULT_TYPE_MCD_DIS	18
    575/* Values 16 --> -2 are reserved.  */
    576#define HV_FAULT_TYPE_MULTIPLE	-1
    577
    578/* Flags argument for mmu_{map,unmap}_addr(), mmu_demap_{page,context,all}(),
    579 * and mmu_{map,unmap}_perm_addr().
    580 */
    581#define HV_MMU_DMMU			0x01
    582#define HV_MMU_IMMU			0x02
    583#define HV_MMU_ALL			(HV_MMU_DMMU | HV_MMU_IMMU)
    584
    585/* mmu_map_addr()
    586 * TRAP:	HV_MMU_MAP_ADDR_TRAP
    587 * ARG0:	virtual address
    588 * ARG1:	mmu context
    589 * ARG2:	TTE
    590 * ARG3:	flags (HV_MMU_{IMMU,DMMU})
    591 * ERRORS:	EINVAL		Invalid virtual address, mmu context, or flags
    592 *		EBADPGSZ	Invalid page size value
    593 *		ENORADDR	Invalid real address in TTE
    594 *
    595 * Create a non-permanent mapping using the given TTE, virtual
    596 * address, and mmu context.  The flags argument determines which
    597 * (data, or instruction, or both) TLB the mapping gets loaded into.
    598 *
    599 * The behavior is undefined if the valid bit is clear in the TTE.
    600 *
    601 * Note: This API call is for privileged code to specify temporary translation
    602 *       mappings without the need to create and manage a TSB.
    603 */
    604
    605/* mmu_unmap_addr()
    606 * TRAP:	HV_MMU_UNMAP_ADDR_TRAP
    607 * ARG0:	virtual address
    608 * ARG1:	mmu context
    609 * ARG2:	flags (HV_MMU_{IMMU,DMMU})
    610 * ERRORS:	EINVAL		Invalid virtual address, mmu context, or flags
    611 *
    612 * Demaps the given virtual address in the given mmu context on this
    613 * CPU.  This function is intended to be used to demap pages mapped
    614 * with mmu_map_addr.  This service is equivalent to invoking
    615 * mmu_demap_page() with only the current CPU in the CPU list. The
    616 * flags argument determines which (data, or instruction, or both) TLB
    617 * the mapping gets unmapped from.
    618 *
    619 * Attempting to perform an unmap operation for a previously defined
    620 * permanent mapping will have undefined results.
    621 */
    622
    623/* mmu_tsb_ctx0()
    624 * TRAP:	HV_FAST_TRAP
    625 * FUNCTION:	HV_FAST_MMU_TSB_CTX0
    626 * ARG0:	number of TSB descriptions
    627 * ARG1:	TSB descriptions pointer
    628 * RET0:	status
    629 * ERRORS:	ENORADDR		Invalid TSB descriptions pointer or
    630 *					TSB base within a descriptor
    631 *		EBADALIGN		TSB descriptions pointer is not aligned
    632 *					to an 8-byte boundary, or TSB base
    633 *					within a descriptor is not aligned for
    634 *					the given TSB size
    635 *		EBADPGSZ		Invalid page size in a TSB descriptor
    636 *		EBADTSB			Invalid associativity or size in a TSB
    637 *					descriptor
    638 *		EINVAL			Invalid number of TSB descriptions, or
    639 *					invalid context index in a TSB
    640 *					descriptor, or index page size not
    641 *					equal to smallest page size in page
    642 *					size bitmask field.
    643 *
    644 * Configures the TSBs for the current CPU for virtual addresses with
    645 * context zero.  The TSB descriptions pointer is a pointer to an
    646 * array of the given number of TSB descriptions.
    647 *
    648 * Note: The maximum number of TSBs available to a virtual CPU is given by the
    649 *       mmu-max-#tsbs property of the cpu's corresponding "cpu" node in the
    650 *       machine description.
    651 */
    652#define HV_FAST_MMU_TSB_CTX0		0x20
    653
    654#ifndef __ASSEMBLY__
    655unsigned long sun4v_mmu_tsb_ctx0(unsigned long num_descriptions,
    656				 unsigned long tsb_desc_ra);
    657#endif
    658
    659/* mmu_tsb_ctxnon0()
    660 * TRAP:	HV_FAST_TRAP
    661 * FUNCTION:	HV_FAST_MMU_TSB_CTXNON0
    662 * ARG0:	number of TSB descriptions
    663 * ARG1:	TSB descriptions pointer
    664 * RET0:	status
    665 * ERRORS:	Same as for mmu_tsb_ctx0() above.
    666 *
    667 * Configures the TSBs for the current CPU for virtual addresses with
    668 * non-zero contexts.  The TSB descriptions pointer is a pointer to an
    669 * array of the given number of TSB descriptions.
    670 *
    671 * Note: A maximum of 16 TSBs may be specified in the TSB description list.
    672 */
    673#define HV_FAST_MMU_TSB_CTXNON0		0x21
    674
    675/* mmu_demap_page()
    676 * TRAP:	HV_FAST_TRAP
    677 * FUNCTION:	HV_FAST_MMU_DEMAP_PAGE
    678 * ARG0:	reserved, must be zero
    679 * ARG1:	reserved, must be zero
    680 * ARG2:	virtual address
    681 * ARG3:	mmu context
    682 * ARG4:	flags (HV_MMU_{IMMU,DMMU})
    683 * RET0:	status
    684 * ERRORS:	EINVAL			Invalid virtual address, context, or
    685 *					flags value
    686 *		ENOTSUPPORTED		ARG0 or ARG1 is non-zero
    687 *
    688 * Demaps any page mapping of the given virtual address in the given
    689 * mmu context for the current virtual CPU.  Any virtually tagged
    690 * caches are guaranteed to be kept consistent.  The flags argument
    691 * determines which TLB (instruction, or data, or both) participate in
    692 * the operation.
    693 *
    694 * ARG0 and ARG1 are both reserved and must be set to zero.
    695 */
    696#define HV_FAST_MMU_DEMAP_PAGE		0x22
    697
    698/* mmu_demap_ctx()
    699 * TRAP:	HV_FAST_TRAP
    700 * FUNCTION:	HV_FAST_MMU_DEMAP_CTX
    701 * ARG0:	reserved, must be zero
    702 * ARG1:	reserved, must be zero
    703 * ARG2:	mmu context
    704 * ARG3:	flags (HV_MMU_{IMMU,DMMU})
    705 * RET0:	status
    706 * ERRORS:	EINVAL			Invalid context or flags value
    707 *		ENOTSUPPORTED		ARG0 or ARG1 is non-zero
    708 *
    709 * Demaps all non-permanent virtual page mappings previously specified
    710 * for the given context for the current virtual CPU.  Any virtual
    711 * tagged caches are guaranteed to be kept consistent.  The flags
    712 * argument determines which TLB (instruction, or data, or both)
    713 * participate in the operation.
    714 *
    715 * ARG0 and ARG1 are both reserved and must be set to zero.
    716 */
    717#define HV_FAST_MMU_DEMAP_CTX		0x23
    718
    719/* mmu_demap_all()
    720 * TRAP:	HV_FAST_TRAP
    721 * FUNCTION:	HV_FAST_MMU_DEMAP_ALL
    722 * ARG0:	reserved, must be zero
    723 * ARG1:	reserved, must be zero
    724 * ARG2:	flags (HV_MMU_{IMMU,DMMU})
    725 * RET0:	status
    726 * ERRORS:	EINVAL			Invalid flags value
    727 *		ENOTSUPPORTED		ARG0 or ARG1 is non-zero
    728 *
    729 * Demaps all non-permanent virtual page mappings previously specified
    730 * for the current virtual CPU.  Any virtual tagged caches are
    731 * guaranteed to be kept consistent.  The flags argument determines
    732 * which TLB (instruction, or data, or both) participate in the
    733 * operation.
    734 *
    735 * ARG0 and ARG1 are both reserved and must be set to zero.
    736 */
    737#define HV_FAST_MMU_DEMAP_ALL		0x24
    738
    739#ifndef __ASSEMBLY__
    740void sun4v_mmu_demap_all(void);
    741#endif
    742
    743/* mmu_map_perm_addr()
    744 * TRAP:	HV_FAST_TRAP
    745 * FUNCTION:	HV_FAST_MMU_MAP_PERM_ADDR
    746 * ARG0:	virtual address
    747 * ARG1:	reserved, must be zero
    748 * ARG2:	TTE
    749 * ARG3:	flags (HV_MMU_{IMMU,DMMU})
    750 * RET0:	status
    751 * ERRORS:	EINVAL			Invalid virtual address or flags value
    752 *		EBADPGSZ		Invalid page size value
    753 *		ENORADDR		Invalid real address in TTE
    754 *		ETOOMANY		Too many mappings (max of 8 reached)
    755 *
    756 * Create a permanent mapping using the given TTE and virtual address
    757 * for context 0 on the calling virtual CPU.  A maximum of 8 such
    758 * permanent mappings may be specified by privileged code.  Mappings
    759 * may be removed with mmu_unmap_perm_addr().
    760 *
    761 * The behavior is undefined if a TTE with the valid bit clear is given.
    762 *
    763 * Note: This call is used to specify address space mappings for which
    764 *       privileged code does not expect to receive misses.  For example,
    765 *       this mechanism can be used to map kernel nucleus code and data.
    766 */
    767#define HV_FAST_MMU_MAP_PERM_ADDR	0x25
    768
    769#ifndef __ASSEMBLY__
    770unsigned long sun4v_mmu_map_perm_addr(unsigned long vaddr,
    771				      unsigned long set_to_zero,
    772				      unsigned long tte,
    773				      unsigned long flags);
    774#endif
    775
    776/* mmu_fault_area_conf()
    777 * TRAP:	HV_FAST_TRAP
    778 * FUNCTION:	HV_FAST_MMU_FAULT_AREA_CONF
    779 * ARG0:	real address
    780 * RET0:	status
    781 * RET1:	previous mmu fault area real address
    782 * ERRORS:	ENORADDR		Invalid real address
    783 *		EBADALIGN		Invalid alignment for fault area
    784 *
    785 * Configure the MMU fault status area for the calling CPU.  A 64-byte
    786 * aligned real address specifies where MMU fault status information
    787 * is placed.  The return value is the previously specified area, or 0
    788 * for the first invocation.  Specifying a fault area at real address
    789 * 0 is not allowed.
    790 */
    791#define HV_FAST_MMU_FAULT_AREA_CONF	0x26
    792
    793/* mmu_enable()
    794 * TRAP:	HV_FAST_TRAP
    795 * FUNCTION:	HV_FAST_MMU_ENABLE
    796 * ARG0:	enable flag
    797 * ARG1:	return target address
    798 * RET0:	status
    799 * ERRORS:	ENORADDR		Invalid real address when disabling
    800 *					translation.
    801 *		EBADALIGN		The return target address is not
    802 *					aligned to an instruction.
    803 *		EINVAL			The enable flag request the current
    804 *					operating mode (e.g. disable if already
    805 *					disabled)
    806 *
    807 * Enable or disable virtual address translation for the calling CPU
    808 * within the virtual machine domain.  If the enable flag is zero,
    809 * translation is disabled, any non-zero value will enable
    810 * translation.
    811 *
    812 * When this function returns, the newly selected translation mode
    813 * will be active.  If the mmu is being enabled, then the return
    814 * target address is a virtual address else it is a real address.
    815 *
    816 * Upon successful completion, control will be returned to the given
    817 * return target address (ie. the cpu will jump to that address).  On
    818 * failure, the previous mmu mode remains and the trap simply returns
    819 * as normal with the appropriate error code in RET0.
    820 */
    821#define HV_FAST_MMU_ENABLE		0x27
    822
    823/* mmu_unmap_perm_addr()
    824 * TRAP:	HV_FAST_TRAP
    825 * FUNCTION:	HV_FAST_MMU_UNMAP_PERM_ADDR
    826 * ARG0:	virtual address
    827 * ARG1:	reserved, must be zero
    828 * ARG2:	flags (HV_MMU_{IMMU,DMMU})
    829 * RET0:	status
    830 * ERRORS:	EINVAL			Invalid virtual address or flags value
    831 *		ENOMAP			Specified mapping was not found
    832 *
    833 * Demaps any permanent page mapping (established via
    834 * mmu_map_perm_addr()) at the given virtual address for context 0 on
    835 * the current virtual CPU.  Any virtual tagged caches are guaranteed
    836 * to be kept consistent.
    837 */
    838#define HV_FAST_MMU_UNMAP_PERM_ADDR	0x28
    839
    840/* mmu_tsb_ctx0_info()
    841 * TRAP:	HV_FAST_TRAP
    842 * FUNCTION:	HV_FAST_MMU_TSB_CTX0_INFO
    843 * ARG0:	max TSBs
    844 * ARG1:	buffer pointer
    845 * RET0:	status
    846 * RET1:	number of TSBs
    847 * ERRORS:	EINVAL			Supplied buffer is too small
    848 *		EBADALIGN		The buffer pointer is badly aligned
    849 *		ENORADDR		Invalid real address for buffer pointer
    850 *
    851 * Return the TSB configuration as previous defined by mmu_tsb_ctx0()
    852 * into the provided buffer.  The size of the buffer is given in ARG1
    853 * in terms of the number of TSB description entries.
    854 *
    855 * Upon return, RET1 always contains the number of TSB descriptions
    856 * previously configured.  If zero TSBs were configured, EOK is
    857 * returned with RET1 containing 0.
    858 */
    859#define HV_FAST_MMU_TSB_CTX0_INFO	0x29
    860
    861/* mmu_tsb_ctxnon0_info()
    862 * TRAP:	HV_FAST_TRAP
    863 * FUNCTION:	HV_FAST_MMU_TSB_CTXNON0_INFO
    864 * ARG0:	max TSBs
    865 * ARG1:	buffer pointer
    866 * RET0:	status
    867 * RET1:	number of TSBs
    868 * ERRORS:	EINVAL			Supplied buffer is too small
    869 *		EBADALIGN		The buffer pointer is badly aligned
    870 *		ENORADDR		Invalid real address for buffer pointer
    871 *
    872 * Return the TSB configuration as previous defined by
    873 * mmu_tsb_ctxnon0() into the provided buffer.  The size of the buffer
    874 * is given in ARG1 in terms of the number of TSB description entries.
    875 *
    876 * Upon return, RET1 always contains the number of TSB descriptions
    877 * previously configured.  If zero TSBs were configured, EOK is
    878 * returned with RET1 containing 0.
    879 */
    880#define HV_FAST_MMU_TSB_CTXNON0_INFO	0x2a
    881
    882/* mmu_fault_area_info()
    883 * TRAP:	HV_FAST_TRAP
    884 * FUNCTION:	HV_FAST_MMU_FAULT_AREA_INFO
    885 * RET0:	status
    886 * RET1:	fault area real address
    887 * ERRORS:	No errors defined.
    888 *
    889 * Return the currently defined MMU fault status area for the current
    890 * CPU.  The real address of the fault status area is returned in
    891 * RET1, or 0 is returned in RET1 if no fault status area is defined.
    892 *
    893 * Note: mmu_fault_area_conf() may be called with the return value (RET1)
    894 *       from this service if there is a need to save and restore the fault
    895 *	 area for a cpu.
    896 */
    897#define HV_FAST_MMU_FAULT_AREA_INFO	0x2b
    898
    899/* Cache and Memory services. */
    900
    901/* mem_scrub()
    902 * TRAP:	HV_FAST_TRAP
    903 * FUNCTION:	HV_FAST_MEM_SCRUB
    904 * ARG0:	real address
    905 * ARG1:	length
    906 * RET0:	status
    907 * RET1:	length scrubbed
    908 * ERRORS:	ENORADDR	Invalid real address
    909 *		EBADALIGN	Start address or length are not correctly
    910 *				aligned
    911 *		EINVAL		Length is zero
    912 *
    913 * Zero the memory contents in the range real address to real address
    914 * plus length minus 1.  Also, valid ECC will be generated for that
    915 * memory address range.  Scrubbing is started at the given real
    916 * address, but may not scrub the entire given length.  The actual
    917 * length scrubbed will be returned in RET1.
    918 *
    919 * The real address and length must be aligned on an 8K boundary, or
    920 * contain the start address and length from a sun4v error report.
    921 *
    922 * Note: There are two uses for this function.  The first use is to block clear
    923 *       and initialize memory and the second is to scrub an u ncorrectable
    924 *       error reported via a resumable or non-resumable trap.  The second
    925 *       use requires the arguments to be equal to the real address and length
    926 *       provided in a sun4v memory error report.
    927 */
    928#define HV_FAST_MEM_SCRUB		0x31
    929
    930/* mem_sync()
    931 * TRAP:	HV_FAST_TRAP
    932 * FUNCTION:	HV_FAST_MEM_SYNC
    933 * ARG0:	real address
    934 * ARG1:	length
    935 * RET0:	status
    936 * RET1:	length synced
    937 * ERRORS:	ENORADDR	Invalid real address
    938 *		EBADALIGN	Start address or length are not correctly
    939 *				aligned
    940 *		EINVAL		Length is zero
    941 *
    942 * Force the next access within the real address to real address plus
    943 * length minus 1 to be fetches from main system memory.  Less than
    944 * the given length may be synced, the actual amount synced is
    945 * returned in RET1.  The real address and length must be aligned on
    946 * an 8K boundary.
    947 */
    948#define HV_FAST_MEM_SYNC		0x32
    949
    950/* Coprocessor services
    951 *
    952 * M7 and later processors provide an on-chip coprocessor which
    953 * accelerates database operations, and is known internally as
    954 * DAX.
    955 */
    956
    957/* ccb_submit()
    958 * TRAP:	HV_FAST_TRAP
    959 * FUNCTION:	HV_CCB_SUBMIT
    960 * ARG0:	address of CCB array
    961 * ARG1:	size (in bytes) of CCB array being submitted
    962 * ARG2:	flags
    963 * ARG3:	reserved
    964 * RET0:	status (success or error code)
    965 * RET1:	size (in bytes) of CCB array that was accepted (might be less
    966 *		than arg1)
    967 * RET2:	status data
    968 *		if status == ENOMAP or ENOACCESS, identifies the VA in question
    969 *		if status == EUNAVAILBLE, unavailable code
    970 * RET3:	reserved
    971 *
    972 * ERRORS:	EOK		successful submission (check size)
    973 *		EWOULDBLOCK	could not finish submissions, try again
    974 *		EBADALIGN	array not 64B aligned or size not 64B multiple
    975 *		ENORADDR	invalid RA for array or in CCB
    976 *		ENOMAP		could not translate address (see status data)
    977 *		EINVAL		invalid ccb or arguments
    978 *		ETOOMANY	too many ccbs with all-or-nothing flag
    979 *		ENOACCESS	guest has no access to submit ccbs or address
    980 *				in CCB does not have correct permissions (check
    981 *				status data)
    982 *		EUNAVAILABLE	ccb operation could not be performed at this
    983 *				time (check status data)
    984 *				Status data codes:
    985 *					0 - exact CCB could not be executed
    986 *					1 - CCB opcode cannot be executed
    987 *					2 - CCB version cannot be executed
    988 *					3 - vcpu cannot execute CCBs
    989 *					4 - no CCBs can be executed
    990 */
    991
    992#define HV_CCB_SUBMIT               0x34
    993#ifndef __ASSEMBLY__
    994unsigned long sun4v_ccb_submit(unsigned long ccb_buf,
    995			       unsigned long len,
    996			       unsigned long flags,
    997			       unsigned long reserved,
    998			       void *submitted_len,
    999			       void *status_data);
   1000#endif
   1001
   1002/* flags (ARG2) */
   1003#define HV_CCB_QUERY_CMD		BIT(1)
   1004#define HV_CCB_ARG0_TYPE_REAL		0UL
   1005#define HV_CCB_ARG0_TYPE_PRIMARY	BIT(4)
   1006#define HV_CCB_ARG0_TYPE_SECONDARY	BIT(5)
   1007#define HV_CCB_ARG0_TYPE_NUCLEUS	GENMASK(5, 4)
   1008#define HV_CCB_ARG0_PRIVILEGED		BIT(6)
   1009#define HV_CCB_ALL_OR_NOTHING		BIT(7)
   1010#define HV_CCB_QUEUE_INFO		BIT(8)
   1011#define HV_CCB_VA_REJECT		0UL
   1012#define HV_CCB_VA_SECONDARY		BIT(13)
   1013#define HV_CCB_VA_NUCLEUS		GENMASK(13, 12)
   1014#define HV_CCB_VA_PRIVILEGED		BIT(14)
   1015#define HV_CCB_VA_READ_ADI_DISABLE	BIT(15)	/* DAX2 only */
   1016
   1017/* ccb_info()
   1018 * TRAP:	HV_FAST_TRAP
   1019 * FUNCTION:	HV_CCB_INFO
   1020 * ARG0:	real address of CCB completion area
   1021 * RET0:	status (success or error code)
   1022 * RET1:	info array
   1023 *			- RET1[0]: CCB state
   1024 *			- RET1[1]: dax unit
   1025 *			- RET1[2]: queue number
   1026 *			- RET1[3]: queue position
   1027 *
   1028 * ERRORS:	EOK		operation successful
   1029 *		EBADALIGN	address not 64B aligned
   1030 *		ENORADDR	RA in address not valid
   1031 *		EINVAL		CA not valid
   1032 *		EWOULDBLOCK	info not available for this CCB currently, try
   1033 *				again
   1034 *		ENOACCESS	guest cannot use dax
   1035 */
   1036
   1037#define HV_CCB_INFO                 0x35
   1038#ifndef __ASSEMBLY__
   1039unsigned long sun4v_ccb_info(unsigned long ca,
   1040			     void *info_arr);
   1041#endif
   1042
   1043/* info array byte offsets (RET1) */
   1044#define CCB_INFO_OFFSET_CCB_STATE	0
   1045#define CCB_INFO_OFFSET_DAX_UNIT	2
   1046#define CCB_INFO_OFFSET_QUEUE_NUM	4
   1047#define CCB_INFO_OFFSET_QUEUE_POS	6
   1048
   1049/* CCB state (RET1[0]) */
   1050#define HV_CCB_STATE_COMPLETED      0
   1051#define HV_CCB_STATE_ENQUEUED       1
   1052#define HV_CCB_STATE_INPROGRESS     2
   1053#define HV_CCB_STATE_NOTFOUND       3
   1054
   1055/* ccb_kill()
   1056 * TRAP:	HV_FAST_TRAP
   1057 * FUNCTION:	HV_CCB_KILL
   1058 * ARG0:	real address of CCB completion area
   1059 * RET0:	status (success or error code)
   1060 * RET1:	CCB kill status
   1061 *
   1062 * ERRORS:	EOK		operation successful
   1063 *		EBADALIGN	address not 64B aligned
   1064 *		ENORADDR	RA in address not valid
   1065 *		EINVAL		CA not valid
   1066 *		EWOULDBLOCK	kill not available for this CCB currently, try
   1067 *				again
   1068 *		ENOACCESS	guest cannot use dax
   1069 */
   1070
   1071#define HV_CCB_KILL                 0x36
   1072#ifndef __ASSEMBLY__
   1073unsigned long sun4v_ccb_kill(unsigned long ca,
   1074			     void *kill_status);
   1075#endif
   1076
   1077/* CCB kill status (RET1) */
   1078#define HV_CCB_KILL_COMPLETED       0
   1079#define HV_CCB_KILL_DEQUEUED        1
   1080#define HV_CCB_KILL_KILLED          2
   1081#define HV_CCB_KILL_NOTFOUND        3
   1082
   1083/* Time of day services.
   1084 *
   1085 * The hypervisor maintains the time of day on a per-domain basis.
   1086 * Changing the time of day in one domain does not affect the time of
   1087 * day on any other domain.
   1088 *
   1089 * Time is described by a single unsigned 64-bit word which is the
   1090 * number of seconds since the UNIX Epoch (00:00:00 UTC, January 1,
   1091 * 1970).
   1092 */
   1093
   1094/* tod_get()
   1095 * TRAP:	HV_FAST_TRAP
   1096 * FUNCTION:	HV_FAST_TOD_GET
   1097 * RET0:	status
   1098 * RET1:	TOD
   1099 * ERRORS:	EWOULDBLOCK	TOD resource is temporarily unavailable
   1100 *		ENOTSUPPORTED	If TOD not supported on this platform
   1101 *
   1102 * Return the current time of day.  May block if TOD access is
   1103 * temporarily not possible.
   1104 */
   1105#define HV_FAST_TOD_GET			0x50
   1106
   1107#ifndef __ASSEMBLY__
   1108unsigned long sun4v_tod_get(unsigned long *time);
   1109#endif
   1110
   1111/* tod_set()
   1112 * TRAP:	HV_FAST_TRAP
   1113 * FUNCTION:	HV_FAST_TOD_SET
   1114 * ARG0:	TOD
   1115 * RET0:	status
   1116 * ERRORS:	EWOULDBLOCK	TOD resource is temporarily unavailable
   1117 *		ENOTSUPPORTED	If TOD not supported on this platform
   1118 *
   1119 * The current time of day is set to the value specified in ARG0.  May
   1120 * block if TOD access is temporarily not possible.
   1121 */
   1122#define HV_FAST_TOD_SET			0x51
   1123
   1124#ifndef __ASSEMBLY__
   1125unsigned long sun4v_tod_set(unsigned long time);
   1126#endif
   1127
   1128/* Console services */
   1129
   1130/* con_getchar()
   1131 * TRAP:	HV_FAST_TRAP
   1132 * FUNCTION:	HV_FAST_CONS_GETCHAR
   1133 * RET0:	status
   1134 * RET1:	character
   1135 * ERRORS:	EWOULDBLOCK	No character available.
   1136 *
   1137 * Returns a character from the console device.  If no character is
   1138 * available then an EWOULDBLOCK error is returned.  If a character is
   1139 * available, then the returned status is EOK and the character value
   1140 * is in RET1.
   1141 *
   1142 * A virtual BREAK is represented by the 64-bit value -1.
   1143 *
   1144 * A virtual HUP signal is represented by the 64-bit value -2.
   1145 */
   1146#define HV_FAST_CONS_GETCHAR		0x60
   1147
   1148/* con_putchar()
   1149 * TRAP:	HV_FAST_TRAP
   1150 * FUNCTION:	HV_FAST_CONS_PUTCHAR
   1151 * ARG0:	character
   1152 * RET0:	status
   1153 * ERRORS:	EINVAL		Illegal character
   1154 *		EWOULDBLOCK	Output buffer currently full, would block
   1155 *
   1156 * Send a character to the console device.  Only character values
   1157 * between 0 and 255 may be used.  Values outside this range are
   1158 * invalid except for the 64-bit value -1 which is used to send a
   1159 * virtual BREAK.
   1160 */
   1161#define HV_FAST_CONS_PUTCHAR		0x61
   1162
   1163/* con_read()
   1164 * TRAP:	HV_FAST_TRAP
   1165 * FUNCTION:	HV_FAST_CONS_READ
   1166 * ARG0:	buffer real address
   1167 * ARG1:	buffer size in bytes
   1168 * RET0:	status
   1169 * RET1:	bytes read or BREAK or HUP
   1170 * ERRORS:	EWOULDBLOCK	No character available.
   1171 *
   1172 * Reads characters into a buffer from the console device.  If no
   1173 * character is available then an EWOULDBLOCK error is returned.
   1174 * If a character is available, then the returned status is EOK
   1175 * and the number of bytes read into the given buffer is provided
   1176 * in RET1.
   1177 *
   1178 * A virtual BREAK is represented by the 64-bit RET1 value -1.
   1179 *
   1180 * A virtual HUP signal is represented by the 64-bit RET1 value -2.
   1181 *
   1182 * If BREAK or HUP are indicated, no bytes were read into buffer.
   1183 */
   1184#define HV_FAST_CONS_READ		0x62
   1185
   1186/* con_write()
   1187 * TRAP:	HV_FAST_TRAP
   1188 * FUNCTION:	HV_FAST_CONS_WRITE
   1189 * ARG0:	buffer real address
   1190 * ARG1:	buffer size in bytes
   1191 * RET0:	status
   1192 * RET1:	bytes written
   1193 * ERRORS:	EWOULDBLOCK	Output buffer currently full, would block
   1194 *
   1195 * Send a characters in buffer to the console device.  Breaks must be
   1196 * sent using con_putchar().
   1197 */
   1198#define HV_FAST_CONS_WRITE		0x63
   1199
   1200#ifndef __ASSEMBLY__
   1201long sun4v_con_getchar(long *status);
   1202long sun4v_con_putchar(long c);
   1203long sun4v_con_read(unsigned long buffer,
   1204		    unsigned long size,
   1205		    unsigned long *bytes_read);
   1206unsigned long sun4v_con_write(unsigned long buffer,
   1207			      unsigned long size,
   1208			      unsigned long *bytes_written);
   1209#endif
   1210
   1211/* mach_set_soft_state()
   1212 * TRAP:	HV_FAST_TRAP
   1213 * FUNCTION:	HV_FAST_MACH_SET_SOFT_STATE
   1214 * ARG0:	software state
   1215 * ARG1:	software state description pointer
   1216 * RET0:	status
   1217 * ERRORS:	EINVAL		software state not valid or software state
   1218 *				description is not NULL terminated
   1219 *		ENORADDR	software state description pointer is not a
   1220 *				valid real address
   1221 *		EBADALIGNED	software state description is not correctly
   1222 *				aligned
   1223 *
   1224 * This allows the guest to report it's soft state to the hypervisor.  There
   1225 * are two primary components to this state.  The first part states whether
   1226 * the guest software is running or not.  The second containts optional
   1227 * details specific to the software.
   1228 *
   1229 * The software state argument is defined below in HV_SOFT_STATE_*, and
   1230 * indicates whether the guest is operating normally or in a transitional
   1231 * state.
   1232 *
   1233 * The software state description argument is a real address of a data buffer
   1234 * of size 32-bytes aligned on a 32-byte boundary.  It is treated as a NULL
   1235 * terminated 7-bit ASCII string of up to 31 characters not including the
   1236 * NULL termination.
   1237 */
   1238#define HV_FAST_MACH_SET_SOFT_STATE	0x70
   1239#define  HV_SOFT_STATE_NORMAL		 0x01
   1240#define  HV_SOFT_STATE_TRANSITION	 0x02
   1241
   1242#ifndef __ASSEMBLY__
   1243unsigned long sun4v_mach_set_soft_state(unsigned long soft_state,
   1244				        unsigned long msg_string_ra);
   1245#endif
   1246
   1247/* mach_get_soft_state()
   1248 * TRAP:	HV_FAST_TRAP
   1249 * FUNCTION:	HV_FAST_MACH_GET_SOFT_STATE
   1250 * ARG0:	software state description pointer
   1251 * RET0:	status
   1252 * RET1:	software state
   1253 * ERRORS:	ENORADDR	software state description pointer is not a
   1254 *				valid real address
   1255 *		EBADALIGNED	software state description is not correctly
   1256 *				aligned
   1257 *
   1258 * Retrieve the current value of the guest's software state.  The rules
   1259 * for the software state pointer are the same as for mach_set_soft_state()
   1260 * above.
   1261 */
   1262#define HV_FAST_MACH_GET_SOFT_STATE	0x71
   1263
   1264/* svc_send()
   1265 * TRAP:	HV_FAST_TRAP
   1266 * FUNCTION:	HV_FAST_SVC_SEND
   1267 * ARG0:	service ID
   1268 * ARG1:	buffer real address
   1269 * ARG2:	buffer size
   1270 * RET0:	STATUS
   1271 * RET1:	sent_bytes
   1272 *
   1273 * Be careful, all output registers are clobbered by this operation,
   1274 * so for example it is not possible to save away a value in %o4
   1275 * across the trap.
   1276 */
   1277#define HV_FAST_SVC_SEND		0x80
   1278
   1279/* svc_recv()
   1280 * TRAP:	HV_FAST_TRAP
   1281 * FUNCTION:	HV_FAST_SVC_RECV
   1282 * ARG0:	service ID
   1283 * ARG1:	buffer real address
   1284 * ARG2:	buffer size
   1285 * RET0:	STATUS
   1286 * RET1:	recv_bytes
   1287 *
   1288 * Be careful, all output registers are clobbered by this operation,
   1289 * so for example it is not possible to save away a value in %o4
   1290 * across the trap.
   1291 */
   1292#define HV_FAST_SVC_RECV		0x81
   1293
   1294/* svc_getstatus()
   1295 * TRAP:	HV_FAST_TRAP
   1296 * FUNCTION:	HV_FAST_SVC_GETSTATUS
   1297 * ARG0:	service ID
   1298 * RET0:	STATUS
   1299 * RET1:	status bits
   1300 */
   1301#define HV_FAST_SVC_GETSTATUS		0x82
   1302
   1303/* svc_setstatus()
   1304 * TRAP:	HV_FAST_TRAP
   1305 * FUNCTION:	HV_FAST_SVC_SETSTATUS
   1306 * ARG0:	service ID
   1307 * ARG1:	bits to set
   1308 * RET0:	STATUS
   1309 */
   1310#define HV_FAST_SVC_SETSTATUS		0x83
   1311
   1312/* svc_clrstatus()
   1313 * TRAP:	HV_FAST_TRAP
   1314 * FUNCTION:	HV_FAST_SVC_CLRSTATUS
   1315 * ARG0:	service ID
   1316 * ARG1:	bits to clear
   1317 * RET0:	STATUS
   1318 */
   1319#define HV_FAST_SVC_CLRSTATUS		0x84
   1320
   1321#ifndef __ASSEMBLY__
   1322unsigned long sun4v_svc_send(unsigned long svc_id,
   1323			     unsigned long buffer,
   1324			     unsigned long buffer_size,
   1325			     unsigned long *sent_bytes);
   1326unsigned long sun4v_svc_recv(unsigned long svc_id,
   1327			     unsigned long buffer,
   1328			     unsigned long buffer_size,
   1329			     unsigned long *recv_bytes);
   1330unsigned long sun4v_svc_getstatus(unsigned long svc_id,
   1331				  unsigned long *status_bits);
   1332unsigned long sun4v_svc_setstatus(unsigned long svc_id,
   1333				  unsigned long status_bits);
   1334unsigned long sun4v_svc_clrstatus(unsigned long svc_id,
   1335				  unsigned long status_bits);
   1336#endif
   1337
   1338/* Trap trace services.
   1339 *
   1340 * The hypervisor provides a trap tracing capability for privileged
   1341 * code running on each virtual CPU.  Privileged code provides a
   1342 * round-robin trap trace queue within which the hypervisor writes
   1343 * 64-byte entries detailing hyperprivileged traps taken n behalf of
   1344 * privileged code.  This is provided as a debugging capability for
   1345 * privileged code.
   1346 *
   1347 * The trap trace control structure is 64-bytes long and placed at the
   1348 * start (offset 0) of the trap trace buffer, and is described as
   1349 * follows:
   1350 */
   1351#ifndef __ASSEMBLY__
   1352struct hv_trap_trace_control {
   1353	unsigned long		head_offset;
   1354	unsigned long		tail_offset;
   1355	unsigned long		__reserved[0x30 / sizeof(unsigned long)];
   1356};
   1357#endif
   1358#define HV_TRAP_TRACE_CTRL_HEAD_OFFSET	0x00
   1359#define HV_TRAP_TRACE_CTRL_TAIL_OFFSET	0x08
   1360
   1361/* The head offset is the offset of the most recently completed entry
   1362 * in the trap-trace buffer.  The tail offset is the offset of the
   1363 * next entry to be written.  The control structure is owned and
   1364 * modified by the hypervisor.  A guest may not modify the control
   1365 * structure contents.  Attempts to do so will result in undefined
   1366 * behavior for the guest.
   1367 *
   1368 * Each trap trace buffer entry is laid out as follows:
   1369 */
   1370#ifndef __ASSEMBLY__
   1371struct hv_trap_trace_entry {
   1372	unsigned char	type;		/* Hypervisor or guest entry?	*/
   1373	unsigned char	hpstate;	/* Hyper-privileged state	*/
   1374	unsigned char	tl;		/* Trap level			*/
   1375	unsigned char	gl;		/* Global register level	*/
   1376	unsigned short	tt;		/* Trap type			*/
   1377	unsigned short	tag;		/* Extended trap identifier	*/
   1378	unsigned long	tstate;		/* Trap state			*/
   1379	unsigned long	tick;		/* Tick				*/
   1380	unsigned long	tpc;		/* Trap PC			*/
   1381	unsigned long	f1;		/* Entry specific		*/
   1382	unsigned long	f2;		/* Entry specific		*/
   1383	unsigned long	f3;		/* Entry specific		*/
   1384	unsigned long	f4;		/* Entry specific		*/
   1385};
   1386#endif
   1387#define HV_TRAP_TRACE_ENTRY_TYPE	0x00
   1388#define HV_TRAP_TRACE_ENTRY_HPSTATE	0x01
   1389#define HV_TRAP_TRACE_ENTRY_TL		0x02
   1390#define HV_TRAP_TRACE_ENTRY_GL		0x03
   1391#define HV_TRAP_TRACE_ENTRY_TT		0x04
   1392#define HV_TRAP_TRACE_ENTRY_TAG		0x06
   1393#define HV_TRAP_TRACE_ENTRY_TSTATE	0x08
   1394#define HV_TRAP_TRACE_ENTRY_TICK	0x10
   1395#define HV_TRAP_TRACE_ENTRY_TPC		0x18
   1396#define HV_TRAP_TRACE_ENTRY_F1		0x20
   1397#define HV_TRAP_TRACE_ENTRY_F2		0x28
   1398#define HV_TRAP_TRACE_ENTRY_F3		0x30
   1399#define HV_TRAP_TRACE_ENTRY_F4		0x38
   1400
   1401/* The type field is encoded as follows.  */
   1402#define HV_TRAP_TYPE_UNDEF		0x00 /* Entry content undefined     */
   1403#define HV_TRAP_TYPE_HV			0x01 /* Hypervisor trap entry       */
   1404#define HV_TRAP_TYPE_GUEST		0xff /* Added via ttrace_addentry() */
   1405
   1406/* ttrace_buf_conf()
   1407 * TRAP:	HV_FAST_TRAP
   1408 * FUNCTION:	HV_FAST_TTRACE_BUF_CONF
   1409 * ARG0:	real address
   1410 * ARG1:	number of entries
   1411 * RET0:	status
   1412 * RET1:	number of entries
   1413 * ERRORS:	ENORADDR	Invalid real address
   1414 *		EINVAL		Size is too small
   1415 *		EBADALIGN	Real address not aligned on 64-byte boundary
   1416 *
   1417 * Requests hypervisor trap tracing and declares a virtual CPU's trap
   1418 * trace buffer to the hypervisor.  The real address supplies the real
   1419 * base address of the trap trace queue and must be 64-byte aligned.
   1420 * Specifying a value of 0 for the number of entries disables trap
   1421 * tracing for the calling virtual CPU.  The buffer allocated must be
   1422 * sized for a power of two number of 64-byte trap trace entries plus
   1423 * an initial 64-byte control structure.
   1424 *
   1425 * This may be invoked any number of times so that a virtual CPU may
   1426 * relocate a trap trace buffer or create "snapshots" of information.
   1427 *
   1428 * If the real address is illegal or badly aligned, then trap tracing
   1429 * is disabled and an error is returned.
   1430 *
   1431 * Upon failure with EINVAL, this service call returns in RET1 the
   1432 * minimum number of buffer entries required.  Upon other failures
   1433 * RET1 is undefined.
   1434 */
   1435#define HV_FAST_TTRACE_BUF_CONF		0x90
   1436
   1437/* ttrace_buf_info()
   1438 * TRAP:	HV_FAST_TRAP
   1439 * FUNCTION:	HV_FAST_TTRACE_BUF_INFO
   1440 * RET0:	status
   1441 * RET1:	real address
   1442 * RET2:	size
   1443 * ERRORS:	None defined.
   1444 *
   1445 * Returns the size and location of the previously declared trap-trace
   1446 * buffer.  In the event that no buffer was previously defined, or the
   1447 * buffer is disabled, this call will return a size of zero bytes.
   1448 */
   1449#define HV_FAST_TTRACE_BUF_INFO		0x91
   1450
   1451/* ttrace_enable()
   1452 * TRAP:	HV_FAST_TRAP
   1453 * FUNCTION:	HV_FAST_TTRACE_ENABLE
   1454 * ARG0:	enable
   1455 * RET0:	status
   1456 * RET1:	previous enable state
   1457 * ERRORS:	EINVAL		No trap trace buffer currently defined
   1458 *
   1459 * Enable or disable trap tracing, and return the previous enabled
   1460 * state in RET1.  Future systems may define various flags for the
   1461 * enable argument (ARG0), for the moment a guest should pass
   1462 * "(uint64_t) -1" to enable, and "(uint64_t) 0" to disable all
   1463 * tracing - which will ensure future compatibility.
   1464 */
   1465#define HV_FAST_TTRACE_ENABLE		0x92
   1466
   1467/* ttrace_freeze()
   1468 * TRAP:	HV_FAST_TRAP
   1469 * FUNCTION:	HV_FAST_TTRACE_FREEZE
   1470 * ARG0:	freeze
   1471 * RET0:	status
   1472 * RET1:	previous freeze state
   1473 * ERRORS:	EINVAL		No trap trace buffer currently defined
   1474 *
   1475 * Freeze or unfreeze trap tracing, returning the previous freeze
   1476 * state in RET1.  A guest should pass a non-zero value to freeze and
   1477 * a zero value to unfreeze all tracing.  The returned previous state
   1478 * is 0 for not frozen and 1 for frozen.
   1479 */
   1480#define HV_FAST_TTRACE_FREEZE		0x93
   1481
   1482/* ttrace_addentry()
   1483 * TRAP:	HV_TTRACE_ADDENTRY_TRAP
   1484 * ARG0:	tag (16-bits)
   1485 * ARG1:	data word 0
   1486 * ARG2:	data word 1
   1487 * ARG3:	data word 2
   1488 * ARG4:	data word 3
   1489 * RET0:	status
   1490 * ERRORS:	EINVAL		No trap trace buffer currently defined
   1491 *
   1492 * Add an entry to the trap trace buffer.  Upon return only ARG0/RET0
   1493 * is modified - none of the other registers holding arguments are
   1494 * volatile across this hypervisor service.
   1495 */
   1496
   1497/* Core dump services.
   1498 *
   1499 * Since the hypervisor viraulizes and thus obscures a lot of the
   1500 * physical machine layout and state, traditional OS crash dumps can
   1501 * be difficult to diagnose especially when the problem is a
   1502 * configuration error of some sort.
   1503 *
   1504 * The dump services provide an opaque buffer into which the
   1505 * hypervisor can place it's internal state in order to assist in
   1506 * debugging such situations.  The contents are opaque and extremely
   1507 * platform and hypervisor implementation specific.  The guest, during
   1508 * a core dump, requests that the hypervisor update any information in
   1509 * the dump buffer in preparation to being dumped as part of the
   1510 * domain's memory image.
   1511 */
   1512
   1513/* dump_buf_update()
   1514 * TRAP:	HV_FAST_TRAP
   1515 * FUNCTION:	HV_FAST_DUMP_BUF_UPDATE
   1516 * ARG0:	real address
   1517 * ARG1:	size
   1518 * RET0:	status
   1519 * RET1:	required size of dump buffer
   1520 * ERRORS:	ENORADDR	Invalid real address
   1521 *		EBADALIGN	Real address is not aligned on a 64-byte
   1522 *				boundary
   1523 *		EINVAL		Size is non-zero but less than minimum size
   1524 *				required
   1525 *		ENOTSUPPORTED	Operation not supported on current logical
   1526 *				domain
   1527 *
   1528 * Declare a domain dump buffer to the hypervisor.  The real address
   1529 * provided for the domain dump buffer must be 64-byte aligned.  The
   1530 * size specifies the size of the dump buffer and may be larger than
   1531 * the minimum size specified in the machine description.  The
   1532 * hypervisor will fill the dump buffer with opaque data.
   1533 *
   1534 * Note: A guest may elect to include dump buffer contents as part of a crash
   1535 *       dump to assist with debugging.  This function may be called any number
   1536 *       of times so that a guest may relocate a dump buffer, or create
   1537 *       "snapshots" of any dump-buffer information.  Each call to
   1538 *       dump_buf_update() atomically declares the new dump buffer to the
   1539 *       hypervisor.
   1540 *
   1541 * A specified size of 0 unconfigures the dump buffer.  If the real
   1542 * address is illegal or badly aligned, then any currently active dump
   1543 * buffer is disabled and an error is returned.
   1544 *
   1545 * In the event that the call fails with EINVAL, RET1 contains the
   1546 * minimum size requires by the hypervisor for a valid dump buffer.
   1547 */
   1548#define HV_FAST_DUMP_BUF_UPDATE		0x94
   1549
   1550/* dump_buf_info()
   1551 * TRAP:	HV_FAST_TRAP
   1552 * FUNCTION:	HV_FAST_DUMP_BUF_INFO
   1553 * RET0:	status
   1554 * RET1:	real address of current dump buffer
   1555 * RET2:	size of current dump buffer
   1556 * ERRORS:	No errors defined.
   1557 *
   1558 * Return the currently configures dump buffer description.  A
   1559 * returned size of 0 bytes indicates an undefined dump buffer.  In
   1560 * this case the return address in RET1 is undefined.
   1561 */
   1562#define HV_FAST_DUMP_BUF_INFO		0x95
   1563
   1564/* Device interrupt services.
   1565 *
   1566 * Device interrupts are allocated to system bus bridges by the hypervisor,
   1567 * and described to OBP in the machine description.  OBP then describes
   1568 * these interrupts to the OS via properties in the device tree.
   1569 *
   1570 * Terminology:
   1571 *
   1572 *	cpuid		Unique opaque value which represents a target cpu.
   1573 *
   1574 *	devhandle	Device handle.  It uniquely identifies a device, and
   1575 *			consistes of the lower 28-bits of the hi-cell of the
   1576 *			first entry of the device's "reg" property in the
   1577 *			OBP device tree.
   1578 *
   1579 *	devino		Device interrupt number.  Specifies the relative
   1580 *			interrupt number within the device.  The unique
   1581 *			combination of devhandle and devino are used to
   1582 *			identify a specific device interrupt.
   1583 *
   1584 *			Note: The devino value is the same as the values in the
   1585 *			      "interrupts" property or "interrupt-map" property
   1586 *			      in the OBP device tree for that device.
   1587 *
   1588 *	sysino		System interrupt number.  A 64-bit unsigned interger
   1589 *			representing a unique interrupt within a virtual
   1590 *			machine.
   1591 *
   1592 *	intr_state	A flag representing the interrupt state for a given
   1593 *			sysino.  The state values are defined below.
   1594 *
   1595 *	intr_enabled	A flag representing the 'enabled' state for a given
   1596 *			sysino.  The enable values are defined below.
   1597 */
   1598
   1599#define HV_INTR_STATE_IDLE		0 /* Nothing pending */
   1600#define HV_INTR_STATE_RECEIVED		1 /* Interrupt received by hardware */
   1601#define HV_INTR_STATE_DELIVERED		2 /* Interrupt delivered to queue */
   1602
   1603#define HV_INTR_DISABLED		0 /* sysino not enabled */
   1604#define HV_INTR_ENABLED			1 /* sysino enabled */
   1605
   1606/* intr_devino_to_sysino()
   1607 * TRAP:	HV_FAST_TRAP
   1608 * FUNCTION:	HV_FAST_INTR_DEVINO2SYSINO
   1609 * ARG0:	devhandle
   1610 * ARG1:	devino
   1611 * RET0:	status
   1612 * RET1:	sysino
   1613 * ERRORS:	EINVAL		Invalid devhandle/devino
   1614 *
   1615 * Converts a device specific interrupt number of the given
   1616 * devhandle/devino into a system specific ino (sysino).
   1617 */
   1618#define HV_FAST_INTR_DEVINO2SYSINO	0xa0
   1619
   1620#ifndef __ASSEMBLY__
   1621unsigned long sun4v_devino_to_sysino(unsigned long devhandle,
   1622				     unsigned long devino);
   1623#endif
   1624
   1625/* intr_getenabled()
   1626 * TRAP:	HV_FAST_TRAP
   1627 * FUNCTION:	HV_FAST_INTR_GETENABLED
   1628 * ARG0:	sysino
   1629 * RET0:	status
   1630 * RET1:	intr_enabled (HV_INTR_{DISABLED,ENABLED})
   1631 * ERRORS:	EINVAL		Invalid sysino
   1632 *
   1633 * Returns interrupt enabled state in RET1 for the interrupt defined
   1634 * by the given sysino.
   1635 */
   1636#define HV_FAST_INTR_GETENABLED		0xa1
   1637
   1638#ifndef __ASSEMBLY__
   1639unsigned long sun4v_intr_getenabled(unsigned long sysino);
   1640#endif
   1641
   1642/* intr_setenabled()
   1643 * TRAP:	HV_FAST_TRAP
   1644 * FUNCTION:	HV_FAST_INTR_SETENABLED
   1645 * ARG0:	sysino
   1646 * ARG1:	intr_enabled (HV_INTR_{DISABLED,ENABLED})
   1647 * RET0:	status
   1648 * ERRORS:	EINVAL		Invalid sysino or intr_enabled value
   1649 *
   1650 * Set the 'enabled' state of the interrupt sysino.
   1651 */
   1652#define HV_FAST_INTR_SETENABLED		0xa2
   1653
   1654#ifndef __ASSEMBLY__
   1655unsigned long sun4v_intr_setenabled(unsigned long sysino,
   1656				    unsigned long intr_enabled);
   1657#endif
   1658
   1659/* intr_getstate()
   1660 * TRAP:	HV_FAST_TRAP
   1661 * FUNCTION:	HV_FAST_INTR_GETSTATE
   1662 * ARG0:	sysino
   1663 * RET0:	status
   1664 * RET1:	intr_state (HV_INTR_STATE_*)
   1665 * ERRORS:	EINVAL		Invalid sysino
   1666 *
   1667 * Returns current state of the interrupt defined by the given sysino.
   1668 */
   1669#define HV_FAST_INTR_GETSTATE		0xa3
   1670
   1671#ifndef __ASSEMBLY__
   1672unsigned long sun4v_intr_getstate(unsigned long sysino);
   1673#endif
   1674
   1675/* intr_setstate()
   1676 * TRAP:	HV_FAST_TRAP
   1677 * FUNCTION:	HV_FAST_INTR_SETSTATE
   1678 * ARG0:	sysino
   1679 * ARG1:	intr_state (HV_INTR_STATE_*)
   1680 * RET0:	status
   1681 * ERRORS:	EINVAL		Invalid sysino or intr_state value
   1682 *
   1683 * Sets the current state of the interrupt described by the given sysino
   1684 * value.
   1685 *
   1686 * Note: Setting the state to HV_INTR_STATE_IDLE clears any pending
   1687 *       interrupt for sysino.
   1688 */
   1689#define HV_FAST_INTR_SETSTATE		0xa4
   1690
   1691#ifndef __ASSEMBLY__
   1692unsigned long sun4v_intr_setstate(unsigned long sysino, unsigned long intr_state);
   1693#endif
   1694
   1695/* intr_gettarget()
   1696 * TRAP:	HV_FAST_TRAP
   1697 * FUNCTION:	HV_FAST_INTR_GETTARGET
   1698 * ARG0:	sysino
   1699 * RET0:	status
   1700 * RET1:	cpuid
   1701 * ERRORS:	EINVAL		Invalid sysino
   1702 *
   1703 * Returns CPU that is the current target of the interrupt defined by
   1704 * the given sysino.  The CPU value returned is undefined if the target
   1705 * has not been set via intr_settarget().
   1706 */
   1707#define HV_FAST_INTR_GETTARGET		0xa5
   1708
   1709#ifndef __ASSEMBLY__
   1710unsigned long sun4v_intr_gettarget(unsigned long sysino);
   1711#endif
   1712
   1713/* intr_settarget()
   1714 * TRAP:	HV_FAST_TRAP
   1715 * FUNCTION:	HV_FAST_INTR_SETTARGET
   1716 * ARG0:	sysino
   1717 * ARG1:	cpuid
   1718 * RET0:	status
   1719 * ERRORS:	EINVAL		Invalid sysino
   1720 *		ENOCPU		Invalid cpuid
   1721 *
   1722 * Set the target CPU for the interrupt defined by the given sysino.
   1723 */
   1724#define HV_FAST_INTR_SETTARGET		0xa6
   1725
   1726#ifndef __ASSEMBLY__
   1727unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid);
   1728#endif
   1729
   1730/* vintr_get_cookie()
   1731 * TRAP:	HV_FAST_TRAP
   1732 * FUNCTION:	HV_FAST_VINTR_GET_COOKIE
   1733 * ARG0:	device handle
   1734 * ARG1:	device ino
   1735 * RET0:	status
   1736 * RET1:	cookie
   1737 */
   1738#define HV_FAST_VINTR_GET_COOKIE	0xa7
   1739
   1740/* vintr_set_cookie()
   1741 * TRAP:	HV_FAST_TRAP
   1742 * FUNCTION:	HV_FAST_VINTR_SET_COOKIE
   1743 * ARG0:	device handle
   1744 * ARG1:	device ino
   1745 * ARG2:	cookie
   1746 * RET0:	status
   1747 */
   1748#define HV_FAST_VINTR_SET_COOKIE	0xa8
   1749
   1750/* vintr_get_valid()
   1751 * TRAP:	HV_FAST_TRAP
   1752 * FUNCTION:	HV_FAST_VINTR_GET_VALID
   1753 * ARG0:	device handle
   1754 * ARG1:	device ino
   1755 * RET0:	status
   1756 * RET1:	valid state
   1757 */
   1758#define HV_FAST_VINTR_GET_VALID		0xa9
   1759
   1760/* vintr_set_valid()
   1761 * TRAP:	HV_FAST_TRAP
   1762 * FUNCTION:	HV_FAST_VINTR_SET_VALID
   1763 * ARG0:	device handle
   1764 * ARG1:	device ino
   1765 * ARG2:	valid state
   1766 * RET0:	status
   1767 */
   1768#define HV_FAST_VINTR_SET_VALID		0xaa
   1769
   1770/* vintr_get_state()
   1771 * TRAP:	HV_FAST_TRAP
   1772 * FUNCTION:	HV_FAST_VINTR_GET_STATE
   1773 * ARG0:	device handle
   1774 * ARG1:	device ino
   1775 * RET0:	status
   1776 * RET1:	state
   1777 */
   1778#define HV_FAST_VINTR_GET_STATE		0xab
   1779
   1780/* vintr_set_state()
   1781 * TRAP:	HV_FAST_TRAP
   1782 * FUNCTION:	HV_FAST_VINTR_SET_STATE
   1783 * ARG0:	device handle
   1784 * ARG1:	device ino
   1785 * ARG2:	state
   1786 * RET0:	status
   1787 */
   1788#define HV_FAST_VINTR_SET_STATE		0xac
   1789
   1790/* vintr_get_target()
   1791 * TRAP:	HV_FAST_TRAP
   1792 * FUNCTION:	HV_FAST_VINTR_GET_TARGET
   1793 * ARG0:	device handle
   1794 * ARG1:	device ino
   1795 * RET0:	status
   1796 * RET1:	cpuid
   1797 */
   1798#define HV_FAST_VINTR_GET_TARGET	0xad
   1799
   1800/* vintr_set_target()
   1801 * TRAP:	HV_FAST_TRAP
   1802 * FUNCTION:	HV_FAST_VINTR_SET_TARGET
   1803 * ARG0:	device handle
   1804 * ARG1:	device ino
   1805 * ARG2:	cpuid
   1806 * RET0:	status
   1807 */
   1808#define HV_FAST_VINTR_SET_TARGET	0xae
   1809
   1810#ifndef __ASSEMBLY__
   1811unsigned long sun4v_vintr_get_cookie(unsigned long dev_handle,
   1812				     unsigned long dev_ino,
   1813				     unsigned long *cookie);
   1814unsigned long sun4v_vintr_set_cookie(unsigned long dev_handle,
   1815				     unsigned long dev_ino,
   1816				     unsigned long cookie);
   1817unsigned long sun4v_vintr_get_valid(unsigned long dev_handle,
   1818				    unsigned long dev_ino,
   1819				    unsigned long *valid);
   1820unsigned long sun4v_vintr_set_valid(unsigned long dev_handle,
   1821				    unsigned long dev_ino,
   1822				    unsigned long valid);
   1823unsigned long sun4v_vintr_get_state(unsigned long dev_handle,
   1824				    unsigned long dev_ino,
   1825				    unsigned long *state);
   1826unsigned long sun4v_vintr_set_state(unsigned long dev_handle,
   1827				    unsigned long dev_ino,
   1828				    unsigned long state);
   1829unsigned long sun4v_vintr_get_target(unsigned long dev_handle,
   1830				     unsigned long dev_ino,
   1831				     unsigned long *cpuid);
   1832unsigned long sun4v_vintr_set_target(unsigned long dev_handle,
   1833				     unsigned long dev_ino,
   1834				     unsigned long cpuid);
   1835#endif
   1836
   1837/* PCI IO services.
   1838 *
   1839 * See the terminology descriptions in the device interrupt services
   1840 * section above as those apply here too.  Here are terminology
   1841 * definitions specific to these PCI IO services:
   1842 *
   1843 *	tsbnum		TSB number.  Indentifies which io-tsb is used.
   1844 *			For this version of the specification, tsbnum
   1845 *			must be zero.
   1846 *
   1847 *	tsbindex	TSB index.  Identifies which entry in the TSB
   1848 *			is used.  The first entry is zero.
   1849 *
   1850 *	tsbid		A 64-bit aligned data structure which contains
   1851 *			a tsbnum and a tsbindex.  Bits 63:32 contain the
   1852 *			tsbnum and bits 31:00 contain the tsbindex.
   1853 *
   1854 *			Use the HV_PCI_TSBID() macro to construct such
   1855 * 			values.
   1856 *
   1857 *	io_attributes	IO attributes for IOMMU mappings.  One of more
   1858 *			of the attritbute bits are stores in a 64-bit
   1859 *			value.  The values are defined below.
   1860 *
   1861 *	r_addr		64-bit real address
   1862 *
   1863 *	pci_device	PCI device address.  A PCI device address identifies
   1864 *			a specific device on a specific PCI bus segment.
   1865 *			A PCI device address ia a 32-bit unsigned integer
   1866 *			with the following format:
   1867 *
   1868 *				00000000.bbbbbbbb.dddddfff.00000000
   1869 *
   1870 *			Use the HV_PCI_DEVICE_BUILD() macro to construct
   1871 *			such values.
   1872 *
   1873 *	pci_config_offset
   1874 *			PCI configureation space offset.  For conventional
   1875 *			PCI a value between 0 and 255.  For extended
   1876 *			configuration space, a value between 0 and 4095.
   1877 *
   1878 *			Note: For PCI configuration space accesses, the offset
   1879 *			      must be aligned to the access size.
   1880 *
   1881 *	error_flag	A return value which specifies if the action succeeded
   1882 *			or failed.  0 means no error, non-0 means some error
   1883 *			occurred while performing the service.
   1884 *
   1885 *	io_sync_direction
   1886 *			Direction definition for pci_dma_sync(), defined
   1887 *			below in HV_PCI_SYNC_*.
   1888 *
   1889 *	io_page_list	A list of io_page_addresses, an io_page_address is
   1890 *			a real address.
   1891 *
   1892 *	io_page_list_p	A pointer to an io_page_list.
   1893 *
   1894 *	"size based byte swap" - Some functions do size based byte swapping
   1895 *				 which allows sw to access pointers and
   1896 *				 counters in native form when the processor
   1897 *				 operates in a different endianness than the
   1898 *				 IO bus.  Size-based byte swapping converts a
   1899 *				 multi-byte field between big-endian and
   1900 *				 little-endian format.
   1901 */
   1902
   1903#define HV_PCI_MAP_ATTR_READ		0x01
   1904#define HV_PCI_MAP_ATTR_WRITE		0x02
   1905#define HV_PCI_MAP_ATTR_RELAXED_ORDER	0x04
   1906
   1907#define HV_PCI_DEVICE_BUILD(b,d,f)	\
   1908	((((b) & 0xff) << 16) | \
   1909	 (((d) & 0x1f) << 11) | \
   1910	 (((f) & 0x07) <<  8))
   1911
   1912#define HV_PCI_TSBID(__tsb_num, __tsb_index) \
   1913	((((u64)(__tsb_num)) << 32UL) | ((u64)(__tsb_index)))
   1914
   1915#define HV_PCI_SYNC_FOR_DEVICE		0x01
   1916#define HV_PCI_SYNC_FOR_CPU		0x02
   1917
   1918/* pci_iommu_map()
   1919 * TRAP:	HV_FAST_TRAP
   1920 * FUNCTION:	HV_FAST_PCI_IOMMU_MAP
   1921 * ARG0:	devhandle
   1922 * ARG1:	tsbid
   1923 * ARG2:	#ttes
   1924 * ARG3:	io_attributes
   1925 * ARG4:	io_page_list_p
   1926 * RET0:	status
   1927 * RET1:	#ttes mapped
   1928 * ERRORS:	EINVAL		Invalid devhandle/tsbnum/tsbindex/io_attributes
   1929 *		EBADALIGN	Improperly aligned real address
   1930 *		ENORADDR	Invalid real address
   1931 *
   1932 * Create IOMMU mappings in the sun4v device defined by the given
   1933 * devhandle.  The mappings are created in the TSB defined by the
   1934 * tsbnum component of the given tsbid.  The first mapping is created
   1935 * in the TSB i ndex defined by the tsbindex component of the given tsbid.
   1936 * The call creates up to #ttes mappings, the first one at tsbnum, tsbindex,
   1937 * the second at tsbnum, tsbindex + 1, etc.
   1938 *
   1939 * All mappings are created with the attributes defined by the io_attributes
   1940 * argument.  The page mapping addresses are described in the io_page_list
   1941 * defined by the given io_page_list_p, which is a pointer to the io_page_list.
   1942 * The first entry in the io_page_list is the address for the first iotte, the
   1943 * 2nd for the 2nd iotte, and so on.
   1944 *
   1945 * Each io_page_address in the io_page_list must be appropriately aligned.
   1946 * #ttes must be greater than zero.  For this version of the spec, the tsbnum
   1947 * component of the given tsbid must be zero.
   1948 *
   1949 * Returns the actual number of mappings creates, which may be less than
   1950 * or equal to the argument #ttes.  If the function returns a value which
   1951 * is less than the #ttes, the caller may continus to call the function with
   1952 * an updated tsbid, #ttes, io_page_list_p arguments until all pages are
   1953 * mapped.
   1954 *
   1955 * Note: This function does not imply an iotte cache flush.  The guest must
   1956 *       demap an entry before re-mapping it.
   1957 */
   1958#define HV_FAST_PCI_IOMMU_MAP		0xb0
   1959
   1960/* pci_iommu_demap()
   1961 * TRAP:	HV_FAST_TRAP
   1962 * FUNCTION:	HV_FAST_PCI_IOMMU_DEMAP
   1963 * ARG0:	devhandle
   1964 * ARG1:	tsbid
   1965 * ARG2:	#ttes
   1966 * RET0:	status
   1967 * RET1:	#ttes demapped
   1968 * ERRORS:	EINVAL		Invalid devhandle/tsbnum/tsbindex
   1969 *
   1970 * Demap and flush IOMMU mappings in the device defined by the given
   1971 * devhandle.  Demaps up to #ttes entries in the TSB defined by the tsbnum
   1972 * component of the given tsbid, starting at the TSB index defined by the
   1973 * tsbindex component of the given tsbid.
   1974 *
   1975 * For this version of the spec, the tsbnum of the given tsbid must be zero.
   1976 * #ttes must be greater than zero.
   1977 *
   1978 * Returns the actual number of ttes demapped, which may be less than or equal
   1979 * to the argument #ttes.  If #ttes demapped is less than #ttes, the caller
   1980 * may continue to call this function with updated tsbid and #ttes arguments
   1981 * until all pages are demapped.
   1982 *
   1983 * Note: Entries do not have to be mapped to be demapped.  A demap of an
   1984 *       unmapped page will flush the entry from the tte cache.
   1985 */
   1986#define HV_FAST_PCI_IOMMU_DEMAP		0xb1
   1987
   1988/* pci_iommu_getmap()
   1989 * TRAP:	HV_FAST_TRAP
   1990 * FUNCTION:	HV_FAST_PCI_IOMMU_GETMAP
   1991 * ARG0:	devhandle
   1992 * ARG1:	tsbid
   1993 * RET0:	status
   1994 * RET1:	io_attributes
   1995 * RET2:	real address
   1996 * ERRORS:	EINVAL		Invalid devhandle/tsbnum/tsbindex
   1997 *		ENOMAP		Mapping is not valid, no translation exists
   1998 *
   1999 * Read and return the mapping in the device described by the given devhandle
   2000 * and tsbid.  If successful, the io_attributes shall be returned in RET1
   2001 * and the page address of the mapping shall be returned in RET2.
   2002 *
   2003 * For this version of the spec, the tsbnum component of the given tsbid
   2004 * must be zero.
   2005 */
   2006#define HV_FAST_PCI_IOMMU_GETMAP	0xb2
   2007
   2008/* pci_iommu_getbypass()
   2009 * TRAP:	HV_FAST_TRAP
   2010 * FUNCTION:	HV_FAST_PCI_IOMMU_GETBYPASS
   2011 * ARG0:	devhandle
   2012 * ARG1:	real address
   2013 * ARG2:	io_attributes
   2014 * RET0:	status
   2015 * RET1:	io_addr
   2016 * ERRORS:	EINVAL		Invalid devhandle/io_attributes
   2017 *		ENORADDR	Invalid real address
   2018 *		ENOTSUPPORTED	Function not supported in this implementation.
   2019 *
   2020 * Create a "special" mapping in the device described by the given devhandle,
   2021 * for the given real address and attributes.  Return the IO address in RET1
   2022 * if successful.
   2023 */
   2024#define HV_FAST_PCI_IOMMU_GETBYPASS	0xb3
   2025
   2026/* pci_config_get()
   2027 * TRAP:	HV_FAST_TRAP
   2028 * FUNCTION:	HV_FAST_PCI_CONFIG_GET
   2029 * ARG0:	devhandle
   2030 * ARG1:	pci_device
   2031 * ARG2:	pci_config_offset
   2032 * ARG3:	size
   2033 * RET0:	status
   2034 * RET1:	error_flag
   2035 * RET2:	data
   2036 * ERRORS:	EINVAL		Invalid devhandle/pci_device/offset/size
   2037 *		EBADALIGN	pci_config_offset not size aligned
   2038 *		ENOACCESS	Access to this offset is not permitted
   2039 *
   2040 * Read PCI configuration space for the adapter described by the given
   2041 * devhandle.  Read size (1, 2, or 4) bytes of data from the given
   2042 * pci_device, at pci_config_offset from the beginning of the device's
   2043 * configuration space.  If there was no error, RET1 is set to zero and
   2044 * RET2 is set to the data read.  Insignificant bits in RET2 are not
   2045 * guaranteed to have any specific value and therefore must be ignored.
   2046 *
   2047 * The data returned in RET2 is size based byte swapped.
   2048 *
   2049 * If an error occurs during the read, set RET1 to a non-zero value.  The
   2050 * given pci_config_offset must be 'size' aligned.
   2051 */
   2052#define HV_FAST_PCI_CONFIG_GET		0xb4
   2053
   2054/* pci_config_put()
   2055 * TRAP:	HV_FAST_TRAP
   2056 * FUNCTION:	HV_FAST_PCI_CONFIG_PUT
   2057 * ARG0:	devhandle
   2058 * ARG1:	pci_device
   2059 * ARG2:	pci_config_offset
   2060 * ARG3:	size
   2061 * ARG4:	data
   2062 * RET0:	status
   2063 * RET1:	error_flag
   2064 * ERRORS:	EINVAL		Invalid devhandle/pci_device/offset/size
   2065 *		EBADALIGN	pci_config_offset not size aligned
   2066 *		ENOACCESS	Access to this offset is not permitted
   2067 *
   2068 * Write PCI configuration space for the adapter described by the given
   2069 * devhandle.  Write size (1, 2, or 4) bytes of data in a single operation,
   2070 * at pci_config_offset from the beginning of the device's configuration
   2071 * space.  The data argument contains the data to be written to configuration
   2072 * space.  Prior to writing, the data is size based byte swapped.
   2073 *
   2074 * If an error occurs during the write access, do not generate an error
   2075 * report, do set RET1 to a non-zero value.  Otherwise RET1 is zero.
   2076 * The given pci_config_offset must be 'size' aligned.
   2077 *
   2078 * This function is permitted to read from offset zero in the configuration
   2079 * space described by the given pci_device if necessary to ensure that the
   2080 * write access to config space completes.
   2081 */
   2082#define HV_FAST_PCI_CONFIG_PUT		0xb5
   2083
   2084/* pci_peek()
   2085 * TRAP:	HV_FAST_TRAP
   2086 * FUNCTION:	HV_FAST_PCI_PEEK
   2087 * ARG0:	devhandle
   2088 * ARG1:	real address
   2089 * ARG2:	size
   2090 * RET0:	status
   2091 * RET1:	error_flag
   2092 * RET2:	data
   2093 * ERRORS:	EINVAL		Invalid devhandle or size
   2094 *		EBADALIGN	Improperly aligned real address
   2095 *		ENORADDR	Bad real address
   2096 *		ENOACCESS	Guest access prohibited
   2097 *
   2098 * Attempt to read the IO address given by the given devhandle, real address,
   2099 * and size.  Size must be 1, 2, 4, or 8.  The read is performed as a single
   2100 * access operation using the given size.  If an error occurs when reading
   2101 * from the given location, do not generate an error report, but return a
   2102 * non-zero value in RET1.  If the read was successful, return zero in RET1
   2103 * and return the actual data read in RET2.  The data returned is size based
   2104 * byte swapped.
   2105 *
   2106 * Non-significant bits in RET2 are not guaranteed to have any specific value
   2107 * and therefore must be ignored.  If RET1 is returned as non-zero, the data
   2108 * value is not guaranteed to have any specific value and should be ignored.
   2109 *
   2110 * The caller must have permission to read from the given devhandle, real
   2111 * address, which must be an IO address.  The argument real address must be a
   2112 * size aligned address.
   2113 *
   2114 * The hypervisor implementation of this function must block access to any
   2115 * IO address that the guest does not have explicit permission to access.
   2116 */
   2117#define HV_FAST_PCI_PEEK		0xb6
   2118
   2119/* pci_poke()
   2120 * TRAP:	HV_FAST_TRAP
   2121 * FUNCTION:	HV_FAST_PCI_POKE
   2122 * ARG0:	devhandle
   2123 * ARG1:	real address
   2124 * ARG2:	size
   2125 * ARG3:	data
   2126 * ARG4:	pci_device
   2127 * RET0:	status
   2128 * RET1:	error_flag
   2129 * ERRORS:	EINVAL		Invalid devhandle, size, or pci_device
   2130 *		EBADALIGN	Improperly aligned real address
   2131 *		ENORADDR	Bad real address
   2132 *		ENOACCESS	Guest access prohibited
   2133 *		ENOTSUPPORTED	Function is not supported by implementation
   2134 *
   2135 * Attempt to write data to the IO address given by the given devhandle,
   2136 * real address, and size.  Size must be 1, 2, 4, or 8.  The write is
   2137 * performed as a single access operation using the given size. Prior to
   2138 * writing the data is size based swapped.
   2139 *
   2140 * If an error occurs when writing to the given location, do not generate an
   2141 * error report, but return a non-zero value in RET1.  If the write was
   2142 * successful, return zero in RET1.
   2143 *
   2144 * pci_device describes the configuration address of the device being
   2145 * written to.  The implementation may safely read from offset 0 with
   2146 * the configuration space of the device described by devhandle and
   2147 * pci_device in order to guarantee that the write portion of the operation
   2148 * completes
   2149 *
   2150 * Any error that occurs due to the read shall be reported using the normal
   2151 * error reporting mechanisms .. the read error is not suppressed.
   2152 *
   2153 * The caller must have permission to write to the given devhandle, real
   2154 * address, which must be an IO address.  The argument real address must be a
   2155 * size aligned address.  The caller must have permission to read from
   2156 * the given devhandle, pci_device cofiguration space offset 0.
   2157 *
   2158 * The hypervisor implementation of this function must block access to any
   2159 * IO address that the guest does not have explicit permission to access.
   2160 */
   2161#define HV_FAST_PCI_POKE		0xb7
   2162
   2163/* pci_dma_sync()
   2164 * TRAP:	HV_FAST_TRAP
   2165 * FUNCTION:	HV_FAST_PCI_DMA_SYNC
   2166 * ARG0:	devhandle
   2167 * ARG1:	real address
   2168 * ARG2:	size
   2169 * ARG3:	io_sync_direction
   2170 * RET0:	status
   2171 * RET1:	#synced
   2172 * ERRORS:	EINVAL		Invalid devhandle or io_sync_direction
   2173 *		ENORADDR	Bad real address
   2174 *
   2175 * Synchronize a memory region described by the given real address and size,
   2176 * for the device defined by the given devhandle using the direction(s)
   2177 * defined by the given io_sync_direction.  The argument size is the size of
   2178 * the memory region in bytes.
   2179 *
   2180 * Return the actual number of bytes synchronized in the return value #synced,
   2181 * which may be less than or equal to the argument size.  If the return
   2182 * value #synced is less than size, the caller must continue to call this
   2183 * function with updated real address and size arguments until the entire
   2184 * memory region is synchronized.
   2185 */
   2186#define HV_FAST_PCI_DMA_SYNC		0xb8
   2187
   2188/* PCI MSI services.  */
   2189
   2190#define HV_MSITYPE_MSI32		0x00
   2191#define HV_MSITYPE_MSI64		0x01
   2192
   2193#define HV_MSIQSTATE_IDLE		0x00
   2194#define HV_MSIQSTATE_ERROR		0x01
   2195
   2196#define HV_MSIQ_INVALID			0x00
   2197#define HV_MSIQ_VALID			0x01
   2198
   2199#define HV_MSISTATE_IDLE		0x00
   2200#define HV_MSISTATE_DELIVERED		0x01
   2201
   2202#define HV_MSIVALID_INVALID		0x00
   2203#define HV_MSIVALID_VALID		0x01
   2204
   2205#define HV_PCIE_MSGTYPE_PME_MSG		0x18
   2206#define HV_PCIE_MSGTYPE_PME_ACK_MSG	0x1b
   2207#define HV_PCIE_MSGTYPE_CORR_MSG	0x30
   2208#define HV_PCIE_MSGTYPE_NONFATAL_MSG	0x31
   2209#define HV_PCIE_MSGTYPE_FATAL_MSG	0x33
   2210
   2211#define HV_MSG_INVALID			0x00
   2212#define HV_MSG_VALID			0x01
   2213
   2214/* pci_msiq_conf()
   2215 * TRAP:	HV_FAST_TRAP
   2216 * FUNCTION:	HV_FAST_PCI_MSIQ_CONF
   2217 * ARG0:	devhandle
   2218 * ARG1:	msiqid
   2219 * ARG2:	real address
   2220 * ARG3:	number of entries
   2221 * RET0:	status
   2222 * ERRORS:	EINVAL		Invalid devhandle, msiqid or nentries
   2223 *		EBADALIGN	Improperly aligned real address
   2224 *		ENORADDR	Bad real address
   2225 *
   2226 * Configure the MSI queue given by the devhandle and msiqid arguments,
   2227 * and to be placed at the given real address and be of the given
   2228 * number of entries.  The real address must be aligned exactly to match
   2229 * the queue size.  Each queue entry is 64-bytes long, so f.e. a 32 entry
   2230 * queue must be aligned on a 2048 byte real address boundary.  The MSI-EQ
   2231 * Head and Tail are initialized so that the MSI-EQ is 'empty'.
   2232 *
   2233 * Implementation Note: Certain implementations have fixed sized queues.  In
   2234 *                      that case, number of entries must contain the correct
   2235 *                      value.
   2236 */
   2237#define HV_FAST_PCI_MSIQ_CONF		0xc0
   2238
   2239/* pci_msiq_info()
   2240 * TRAP:	HV_FAST_TRAP
   2241 * FUNCTION:	HV_FAST_PCI_MSIQ_INFO
   2242 * ARG0:	devhandle
   2243 * ARG1:	msiqid
   2244 * RET0:	status
   2245 * RET1:	real address
   2246 * RET2:	number of entries
   2247 * ERRORS:	EINVAL		Invalid devhandle or msiqid
   2248 *
   2249 * Return the configuration information for the MSI queue described
   2250 * by the given devhandle and msiqid.  The base address of the queue
   2251 * is returned in ARG1 and the number of entries is returned in ARG2.
   2252 * If the queue is unconfigured, the real address is undefined and the
   2253 * number of entries will be returned as zero.
   2254 */
   2255#define HV_FAST_PCI_MSIQ_INFO		0xc1
   2256
   2257/* pci_msiq_getvalid()
   2258 * TRAP:	HV_FAST_TRAP
   2259 * FUNCTION:	HV_FAST_PCI_MSIQ_GETVALID
   2260 * ARG0:	devhandle
   2261 * ARG1:	msiqid
   2262 * RET0:	status
   2263 * RET1:	msiqvalid	(HV_MSIQ_VALID or HV_MSIQ_INVALID)
   2264 * ERRORS:	EINVAL		Invalid devhandle or msiqid
   2265 *
   2266 * Get the valid state of the MSI-EQ described by the given devhandle and
   2267 * msiqid.
   2268 */
   2269#define HV_FAST_PCI_MSIQ_GETVALID	0xc2
   2270
   2271/* pci_msiq_setvalid()
   2272 * TRAP:	HV_FAST_TRAP
   2273 * FUNCTION:	HV_FAST_PCI_MSIQ_SETVALID
   2274 * ARG0:	devhandle
   2275 * ARG1:	msiqid
   2276 * ARG2:	msiqvalid	(HV_MSIQ_VALID or HV_MSIQ_INVALID)
   2277 * RET0:	status
   2278 * ERRORS:	EINVAL		Invalid devhandle or msiqid or msiqvalid
   2279 *				value or MSI EQ is uninitialized
   2280 *
   2281 * Set the valid state of the MSI-EQ described by the given devhandle and
   2282 * msiqid to the given msiqvalid.
   2283 */
   2284#define HV_FAST_PCI_MSIQ_SETVALID	0xc3
   2285
   2286/* pci_msiq_getstate()
   2287 * TRAP:	HV_FAST_TRAP
   2288 * FUNCTION:	HV_FAST_PCI_MSIQ_GETSTATE
   2289 * ARG0:	devhandle
   2290 * ARG1:	msiqid
   2291 * RET0:	status
   2292 * RET1:	msiqstate	(HV_MSIQSTATE_IDLE or HV_MSIQSTATE_ERROR)
   2293 * ERRORS:	EINVAL		Invalid devhandle or msiqid
   2294 *
   2295 * Get the state of the MSI-EQ described by the given devhandle and
   2296 * msiqid.
   2297 */
   2298#define HV_FAST_PCI_MSIQ_GETSTATE	0xc4
   2299
   2300/* pci_msiq_getvalid()
   2301 * TRAP:	HV_FAST_TRAP
   2302 * FUNCTION:	HV_FAST_PCI_MSIQ_GETVALID
   2303 * ARG0:	devhandle
   2304 * ARG1:	msiqid
   2305 * ARG2:	msiqstate	(HV_MSIQSTATE_IDLE or HV_MSIQSTATE_ERROR)
   2306 * RET0:	status
   2307 * ERRORS:	EINVAL		Invalid devhandle or msiqid or msiqstate
   2308 *				value or MSI EQ is uninitialized
   2309 *
   2310 * Set the state of the MSI-EQ described by the given devhandle and
   2311 * msiqid to the given msiqvalid.
   2312 */
   2313#define HV_FAST_PCI_MSIQ_SETSTATE	0xc5
   2314
   2315/* pci_msiq_gethead()
   2316 * TRAP:	HV_FAST_TRAP
   2317 * FUNCTION:	HV_FAST_PCI_MSIQ_GETHEAD
   2318 * ARG0:	devhandle
   2319 * ARG1:	msiqid
   2320 * RET0:	status
   2321 * RET1:	msiqhead
   2322 * ERRORS:	EINVAL		Invalid devhandle or msiqid
   2323 *
   2324 * Get the current MSI EQ queue head for the MSI-EQ described by the
   2325 * given devhandle and msiqid.
   2326 */
   2327#define HV_FAST_PCI_MSIQ_GETHEAD	0xc6
   2328
   2329/* pci_msiq_sethead()
   2330 * TRAP:	HV_FAST_TRAP
   2331 * FUNCTION:	HV_FAST_PCI_MSIQ_SETHEAD
   2332 * ARG0:	devhandle
   2333 * ARG1:	msiqid
   2334 * ARG2:	msiqhead
   2335 * RET0:	status
   2336 * ERRORS:	EINVAL		Invalid devhandle or msiqid or msiqhead,
   2337 *				or MSI EQ is uninitialized
   2338 *
   2339 * Set the current MSI EQ queue head for the MSI-EQ described by the
   2340 * given devhandle and msiqid.
   2341 */
   2342#define HV_FAST_PCI_MSIQ_SETHEAD	0xc7
   2343
   2344/* pci_msiq_gettail()
   2345 * TRAP:	HV_FAST_TRAP
   2346 * FUNCTION:	HV_FAST_PCI_MSIQ_GETTAIL
   2347 * ARG0:	devhandle
   2348 * ARG1:	msiqid
   2349 * RET0:	status
   2350 * RET1:	msiqtail
   2351 * ERRORS:	EINVAL		Invalid devhandle or msiqid
   2352 *
   2353 * Get the current MSI EQ queue tail for the MSI-EQ described by the
   2354 * given devhandle and msiqid.
   2355 */
   2356#define HV_FAST_PCI_MSIQ_GETTAIL	0xc8
   2357
   2358/* pci_msi_getvalid()
   2359 * TRAP:	HV_FAST_TRAP
   2360 * FUNCTION:	HV_FAST_PCI_MSI_GETVALID
   2361 * ARG0:	devhandle
   2362 * ARG1:	msinum
   2363 * RET0:	status
   2364 * RET1:	msivalidstate
   2365 * ERRORS:	EINVAL		Invalid devhandle or msinum
   2366 *
   2367 * Get the current valid/enabled state for the MSI defined by the
   2368 * given devhandle and msinum.
   2369 */
   2370#define HV_FAST_PCI_MSI_GETVALID	0xc9
   2371
   2372/* pci_msi_setvalid()
   2373 * TRAP:	HV_FAST_TRAP
   2374 * FUNCTION:	HV_FAST_PCI_MSI_SETVALID
   2375 * ARG0:	devhandle
   2376 * ARG1:	msinum
   2377 * ARG2:	msivalidstate
   2378 * RET0:	status
   2379 * ERRORS:	EINVAL		Invalid devhandle or msinum or msivalidstate
   2380 *
   2381 * Set the current valid/enabled state for the MSI defined by the
   2382 * given devhandle and msinum.
   2383 */
   2384#define HV_FAST_PCI_MSI_SETVALID	0xca
   2385
   2386/* pci_msi_getmsiq()
   2387 * TRAP:	HV_FAST_TRAP
   2388 * FUNCTION:	HV_FAST_PCI_MSI_GETMSIQ
   2389 * ARG0:	devhandle
   2390 * ARG1:	msinum
   2391 * RET0:	status
   2392 * RET1:	msiqid
   2393 * ERRORS:	EINVAL		Invalid devhandle or msinum or MSI is unbound
   2394 *
   2395 * Get the MSI EQ that the MSI defined by the given devhandle and
   2396 * msinum is bound to.
   2397 */
   2398#define HV_FAST_PCI_MSI_GETMSIQ		0xcb
   2399
   2400/* pci_msi_setmsiq()
   2401 * TRAP:	HV_FAST_TRAP
   2402 * FUNCTION:	HV_FAST_PCI_MSI_SETMSIQ
   2403 * ARG0:	devhandle
   2404 * ARG1:	msinum
   2405 * ARG2:	msitype
   2406 * ARG3:	msiqid
   2407 * RET0:	status
   2408 * ERRORS:	EINVAL		Invalid devhandle or msinum or msiqid
   2409 *
   2410 * Set the MSI EQ that the MSI defined by the given devhandle and
   2411 * msinum is bound to.
   2412 */
   2413#define HV_FAST_PCI_MSI_SETMSIQ		0xcc
   2414
   2415/* pci_msi_getstate()
   2416 * TRAP:	HV_FAST_TRAP
   2417 * FUNCTION:	HV_FAST_PCI_MSI_GETSTATE
   2418 * ARG0:	devhandle
   2419 * ARG1:	msinum
   2420 * RET0:	status
   2421 * RET1:	msistate
   2422 * ERRORS:	EINVAL		Invalid devhandle or msinum
   2423 *
   2424 * Get the state of the MSI defined by the given devhandle and msinum.
   2425 * If not initialized, return HV_MSISTATE_IDLE.
   2426 */
   2427#define HV_FAST_PCI_MSI_GETSTATE	0xcd
   2428
   2429/* pci_msi_setstate()
   2430 * TRAP:	HV_FAST_TRAP
   2431 * FUNCTION:	HV_FAST_PCI_MSI_SETSTATE
   2432 * ARG0:	devhandle
   2433 * ARG1:	msinum
   2434 * ARG2:	msistate
   2435 * RET0:	status
   2436 * ERRORS:	EINVAL		Invalid devhandle or msinum or msistate
   2437 *
   2438 * Set the state of the MSI defined by the given devhandle and msinum.
   2439 */
   2440#define HV_FAST_PCI_MSI_SETSTATE	0xce
   2441
   2442/* pci_msg_getmsiq()
   2443 * TRAP:	HV_FAST_TRAP
   2444 * FUNCTION:	HV_FAST_PCI_MSG_GETMSIQ
   2445 * ARG0:	devhandle
   2446 * ARG1:	msgtype
   2447 * RET0:	status
   2448 * RET1:	msiqid
   2449 * ERRORS:	EINVAL		Invalid devhandle or msgtype
   2450 *
   2451 * Get the MSI EQ of the MSG defined by the given devhandle and msgtype.
   2452 */
   2453#define HV_FAST_PCI_MSG_GETMSIQ		0xd0
   2454
   2455/* pci_msg_setmsiq()
   2456 * TRAP:	HV_FAST_TRAP
   2457 * FUNCTION:	HV_FAST_PCI_MSG_SETMSIQ
   2458 * ARG0:	devhandle
   2459 * ARG1:	msgtype
   2460 * ARG2:	msiqid
   2461 * RET0:	status
   2462 * ERRORS:	EINVAL		Invalid devhandle, msgtype, or msiqid
   2463 *
   2464 * Set the MSI EQ of the MSG defined by the given devhandle and msgtype.
   2465 */
   2466#define HV_FAST_PCI_MSG_SETMSIQ		0xd1
   2467
   2468/* pci_msg_getvalid()
   2469 * TRAP:	HV_FAST_TRAP
   2470 * FUNCTION:	HV_FAST_PCI_MSG_GETVALID
   2471 * ARG0:	devhandle
   2472 * ARG1:	msgtype
   2473 * RET0:	status
   2474 * RET1:	msgvalidstate
   2475 * ERRORS:	EINVAL		Invalid devhandle or msgtype
   2476 *
   2477 * Get the valid/enabled state of the MSG defined by the given
   2478 * devhandle and msgtype.
   2479 */
   2480#define HV_FAST_PCI_MSG_GETVALID	0xd2
   2481
   2482/* pci_msg_setvalid()
   2483 * TRAP:	HV_FAST_TRAP
   2484 * FUNCTION:	HV_FAST_PCI_MSG_SETVALID
   2485 * ARG0:	devhandle
   2486 * ARG1:	msgtype
   2487 * ARG2:	msgvalidstate
   2488 * RET0:	status
   2489 * ERRORS:	EINVAL		Invalid devhandle or msgtype or msgvalidstate
   2490 *
   2491 * Set the valid/enabled state of the MSG defined by the given
   2492 * devhandle and msgtype.
   2493 */
   2494#define HV_FAST_PCI_MSG_SETVALID	0xd3
   2495
   2496/* PCI IOMMU v2 definitions and services
   2497 *
   2498 * While the PCI IO definitions above is valid IOMMU v2 adds new PCI IO
   2499 * definitions and services.
   2500 *
   2501 *	CTE		Clump Table Entry. First level table entry in the ATU.
   2502 *
   2503 *	pci_device_list
   2504 *			A 32-bit aligned list of pci_devices.
   2505 *
   2506 *	pci_device_listp
   2507 *			real address of a pci_device_list. 32-bit aligned.
   2508 *
   2509 *	iotte		IOMMU translation table entry.
   2510 *
   2511 *	iotte_attributes
   2512 *			IO Attributes for IOMMU v2 mappings. In addition to
   2513 *			read, write IOMMU v2 supports relax ordering
   2514 *
   2515 *	io_page_list	A 64-bit aligned list of real addresses. Each real
   2516 *			address in an io_page_list must be properly aligned
   2517 *			to the pagesize of the given IOTSB.
   2518 *
   2519 *	io_page_list_p	Real address of an io_page_list, 64-bit aligned.
   2520 *
   2521 *	IOTSB		IO Translation Storage Buffer. An aligned table of
   2522 *			IOTTEs. Each IOTSB has a pagesize, table size, and
   2523 *			virtual address associated with it that must match
   2524 *			a pagesize and table size supported by the un-derlying
   2525 *			hardware implementation. The alignment requirements
   2526 *			for an IOTSB depend on the pagesize used for that IOTSB.
   2527 *			Each IOTTE in an IOTSB maps one pagesize-sized page.
   2528 *			The size of the IOTSB dictates how large of a virtual
   2529 *			address space the IOTSB is capable of mapping.
   2530 *
   2531 *	iotsb_handle	An opaque identifier for an IOTSB. A devhandle plus
   2532 *			iotsb_handle represents a binding of an IOTSB to a
   2533 *			PCI root complex.
   2534 *
   2535 *	iotsb_index	Zero-based IOTTE number within an IOTSB.
   2536 */
   2537
   2538/* The index_count argument consists of two fields:
   2539 * bits 63:48 #iottes and bits 47:0 iotsb_index
   2540 */
   2541#define HV_PCI_IOTSB_INDEX_COUNT(__iottes, __iotsb_index) \
   2542	(((u64)(__iottes) << 48UL) | ((u64)(__iotsb_index)))
   2543
   2544/* pci_iotsb_conf()
   2545 * TRAP:	HV_FAST_TRAP
   2546 * FUNCTION:	HV_FAST_PCI_IOTSB_CONF
   2547 * ARG0:	devhandle
   2548 * ARG1:	r_addr
   2549 * ARG2:	size
   2550 * ARG3:	pagesize
   2551 * ARG4:	iova
   2552 * RET0:	status
   2553 * RET1:	iotsb_handle
   2554 * ERRORS:	EINVAL		Invalid devhandle, size, iova, or pagesize
   2555 *		EBADALIGN	r_addr is not properly aligned
   2556 *		ENORADDR	r_addr is not a valid real address
   2557 *		ETOOMANY	No further IOTSBs may be configured
   2558 *		EBUSY		Duplicate devhandle, raddir, iova combination
   2559 *
   2560 * Create an IOTSB suitable for the PCI root complex identified by devhandle,
   2561 * for the DMA virtual address defined by the argument iova.
   2562 *
   2563 * r_addr is the properly aligned base address of the IOTSB and size is the
   2564 * IOTSB (table) size in bytes.The IOTSB is required to be zeroed prior to
   2565 * being configured. If it contains any values other than zeros then the
   2566 * behavior is undefined.
   2567 *
   2568 * pagesize is the size of each page in the IOTSB. Note that the combination of
   2569 * size (table size) and pagesize must be valid.
   2570 *
   2571 * virt is the DMA virtual address this IOTSB will map.
   2572 *
   2573 * If successful, the opaque 64-bit handle iotsb_handle is returned in ret1.
   2574 * Once configured, privileged access to the IOTSB memory is prohibited and
   2575 * creates undefined behavior. The only permitted access is indirect via these
   2576 * services.
   2577 */
   2578#define HV_FAST_PCI_IOTSB_CONF		0x190
   2579
   2580/* pci_iotsb_info()
   2581 * TRAP:	HV_FAST_TRAP
   2582 * FUNCTION:	HV_FAST_PCI_IOTSB_INFO
   2583 * ARG0:	devhandle
   2584 * ARG1:	iotsb_handle
   2585 * RET0:	status
   2586 * RET1:	r_addr
   2587 * RET2:	size
   2588 * RET3:	pagesize
   2589 * RET4:	iova
   2590 * RET5:	#bound
   2591 * ERRORS:	EINVAL	Invalid devhandle or iotsb_handle
   2592 *
   2593 * This service returns configuration information about an IOTSB previously
   2594 * created with pci_iotsb_conf.
   2595 *
   2596 * iotsb_handle value 0 may be used with this service to inquire about the
   2597 * legacy IOTSB that may or may not exist. If the service succeeds, the return
   2598 * values describe the legacy IOTSB and I/O virtual addresses mapped by that
   2599 * table. However, the table base address r_addr may contain the value -1 which
   2600 * indicates a memory range that cannot be accessed or be reclaimed.
   2601 *
   2602 * The return value #bound contains the number of PCI devices that iotsb_handle
   2603 * is currently bound to.
   2604 */
   2605#define HV_FAST_PCI_IOTSB_INFO		0x191
   2606
   2607/* pci_iotsb_unconf()
   2608 * TRAP:	HV_FAST_TRAP
   2609 * FUNCTION:	HV_FAST_PCI_IOTSB_UNCONF
   2610 * ARG0:	devhandle
   2611 * ARG1:	iotsb_handle
   2612 * RET0:	status
   2613 * ERRORS:	EINVAL	Invalid devhandle or iotsb_handle
   2614 *		EBUSY	The IOTSB is bound and may not be unconfigured
   2615 *
   2616 * This service unconfigures the IOTSB identified by the devhandle and
   2617 * iotsb_handle arguments, previously created with pci_iotsb_conf.
   2618 * The IOTSB must not be currently bound to any device or the service will fail
   2619 *
   2620 * If the call succeeds, iotsb_handle is no longer valid.
   2621 */
   2622#define HV_FAST_PCI_IOTSB_UNCONF	0x192
   2623
   2624/* pci_iotsb_bind()
   2625 * TRAP:	HV_FAST_TRAP
   2626 * FUNCTION:	HV_FAST_PCI_IOTSB_BIND
   2627 * ARG0:	devhandle
   2628 * ARG1:	iotsb_handle
   2629 * ARG2:	pci_device
   2630 * RET0:	status
   2631 * ERRORS:	EINVAL	Invalid devhandle, iotsb_handle, or pci_device
   2632 *		EBUSY	A PCI function is already bound to an IOTSB at the same
   2633 *			address range as specified by devhandle, iotsb_handle.
   2634 *
   2635 * This service binds the PCI function specified by the argument pci_device to
   2636 * the IOTSB specified by the arguments devhandle and iotsb_handle.
   2637 *
   2638 * The PCI device function is bound to the specified IOTSB with the IOVA range
   2639 * specified when the IOTSB was configured via pci_iotsb_conf. If the function
   2640 * is already bound then it is unbound first.
   2641 */
   2642#define HV_FAST_PCI_IOTSB_BIND		0x193
   2643
   2644/* pci_iotsb_unbind()
   2645 * TRAP:	HV_FAST_TRAP
   2646 * FUNCTION:	HV_FAST_PCI_IOTSB_UNBIND
   2647 * ARG0:	devhandle
   2648 * ARG1:	iotsb_handle
   2649 * ARG2:	pci_device
   2650 * RET0:	status
   2651 * ERRORS:	EINVAL	Invalid devhandle, iotsb_handle, or pci_device
   2652 *		ENOMAP	The PCI function was not bound to the specified IOTSB
   2653 *
   2654 * This service unbinds the PCI device specified by the argument pci_device
   2655 * from the IOTSB identified  * by the arguments devhandle and iotsb_handle.
   2656 *
   2657 * If the PCI device is not bound to the specified IOTSB then this service will
   2658 * fail with status ENOMAP
   2659 */
   2660#define HV_FAST_PCI_IOTSB_UNBIND	0x194
   2661
   2662/* pci_iotsb_get_binding()
   2663 * TRAP:	HV_FAST_TRAP
   2664 * FUNCTION:	HV_FAST_PCI_IOTSB_GET_BINDING
   2665 * ARG0:	devhandle
   2666 * ARG1:	iotsb_handle
   2667 * ARG2:	iova
   2668 * RET0:	status
   2669 * RET1:	iotsb_handle
   2670 * ERRORS:	EINVAL	Invalid devhandle, pci_device, or iova
   2671 *		ENOMAP	The PCI function is not bound to an IOTSB at iova
   2672 *
   2673 * This service returns the IOTSB binding, iotsb_handle, for a given pci_device
   2674 * and DMA virtual address, iova.
   2675 *
   2676 * iova must be the base address of a DMA virtual address range as defined by
   2677 * the iommu-address-ranges property in the root complex device node defined
   2678 * by the argument devhandle.
   2679 */
   2680#define HV_FAST_PCI_IOTSB_GET_BINDING	0x195
   2681
   2682/* pci_iotsb_map()
   2683 * TRAP:	HV_FAST_TRAP
   2684 * FUNCTION:	HV_FAST_PCI_IOTSB_MAP
   2685 * ARG0:	devhandle
   2686 * ARG1:	iotsb_handle
   2687 * ARG2:	index_count
   2688 * ARG3:	iotte_attributes
   2689 * ARG4:	io_page_list_p
   2690 * RET0:	status
   2691 * RET1:	#mapped
   2692 * ERRORS:	EINVAL		Invalid devhandle, iotsb_handle, #iottes,
   2693 *				iotsb_index or iotte_attributes
   2694 *		EBADALIGN	Improperly aligned io_page_list_p or I/O page
   2695 *				address in the I/O page list.
   2696 *		ENORADDR	Invalid io_page_list_p or I/O page address in
   2697 *				the I/O page list.
   2698 *
   2699 * This service creates and flushes mappings in the IOTSB defined by the
   2700 * arguments devhandle, iotsb.
   2701 *
   2702 * The index_count argument consists of two fields. Bits 63:48 contain #iotte
   2703 * and bits 47:0 contain iotsb_index
   2704 *
   2705 * The first mapping is created in the IOTSB index specified by iotsb_index.
   2706 * Subsequent mappings are  created at iotsb_index+1 and so on.
   2707 *
   2708 * The attributes of each mapping are defined by the argument iotte_attributes.
   2709 *
   2710 * The io_page_list_p specifies the real address of the 64-bit-aligned list of
   2711 * #iottes I/O page addresses. Each page address must be a properly aligned
   2712 * real address of a page to be mapped in the IOTSB. The first entry in the I/O
   2713 * page list contains the real address of the first page, the 2nd entry for the
   2714 * 2nd page, and so on.
   2715 *
   2716 * #iottes must be greater than zero.
   2717 *
   2718 * The return value #mapped is the actual number of mappings created, which may
   2719 * be less than or equal to the argument #iottes. If the function returns
   2720 * successfully with a #mapped value less than the requested #iottes then the
   2721 * caller should continue to invoke the service with updated iotsb_index,
   2722 * #iottes, and io_page_list_p arguments until all pages are mapped.
   2723 *
   2724 * This service must not be used to demap a mapping. In other words, all
   2725 * mappings must be valid and have  one or both of the RW attribute bits set.
   2726 *
   2727 * Note:
   2728 * It is implementation-defined whether I/O page real address validity checking
   2729 * is done at time mappings are established or deferred until they are
   2730 * accessed.
   2731 */
   2732#define HV_FAST_PCI_IOTSB_MAP		0x196
   2733
   2734/* pci_iotsb_map_one()
   2735 * TRAP:	HV_FAST_TRAP
   2736 * FUNCTION:	HV_FAST_PCI_IOTSB_MAP_ONE
   2737 * ARG0:	devhandle
   2738 * ARG1:	iotsb_handle
   2739 * ARG2:	iotsb_index
   2740 * ARG3:	iotte_attributes
   2741 * ARG4:	r_addr
   2742 * RET0:	status
   2743 * ERRORS:	EINVAL		Invalid devhandle,iotsb_handle, iotsb_index
   2744 *				or iotte_attributes
   2745 *		EBADALIGN	Improperly aligned r_addr
   2746 *		ENORADDR	Invalid r_addr
   2747 *
   2748 * This service creates and flushes a single mapping in the IOTSB defined by the
   2749 * arguments devhandle, iotsb.
   2750 *
   2751 * The mapping for the page at r_addr is created at the IOTSB index specified by
   2752 * iotsb_index with  the attributes iotte_attributes.
   2753 *
   2754 * This service must not be used to demap a mapping. In other words, the mapping
   2755 * must be valid and have one or both of the RW attribute bits set.
   2756 *
   2757 * Note:
   2758 * It is implementation-defined whether I/O page real address validity checking
   2759 * is done at time mappings are established or deferred until they are
   2760 * accessed.
   2761 */
   2762#define HV_FAST_PCI_IOTSB_MAP_ONE	0x197
   2763
   2764/* pci_iotsb_demap()
   2765 * TRAP:	HV_FAST_TRAP
   2766 * FUNCTION:	HV_FAST_PCI_IOTSB_DEMAP
   2767 * ARG0:	devhandle
   2768 * ARG1:	iotsb_handle
   2769 * ARG2:	iotsb_index
   2770 * ARG3:	#iottes
   2771 * RET0:	status
   2772 * RET1:	#unmapped
   2773 * ERRORS:	EINVAL	Invalid devhandle, iotsb_handle, iotsb_index or #iottes
   2774 *
   2775 * This service unmaps and flushes up to #iottes mappings starting at index
   2776 * iotsb_index from the IOTSB defined by the arguments devhandle, iotsb.
   2777 *
   2778 * #iottes must be greater than zero.
   2779 *
   2780 * The actual number of IOTTEs unmapped is returned in #unmapped and may be less
   2781 * than or equal to the requested number of IOTTEs, #iottes.
   2782 *
   2783 * If #unmapped is less than #iottes, the caller should continue to invoke this
   2784 * service with updated iotsb_index and #iottes arguments until all pages are
   2785 * demapped.
   2786 */
   2787#define HV_FAST_PCI_IOTSB_DEMAP		0x198
   2788
   2789/* pci_iotsb_getmap()
   2790 * TRAP:	HV_FAST_TRAP
   2791 * FUNCTION:	HV_FAST_PCI_IOTSB_GETMAP
   2792 * ARG0:	devhandle
   2793 * ARG1:	iotsb_handle
   2794 * ARG2:	iotsb_index
   2795 * RET0:	status
   2796 * RET1:	r_addr
   2797 * RET2:	iotte_attributes
   2798 * ERRORS:	EINVAL	Invalid devhandle, iotsb_handle, or iotsb_index
   2799 *		ENOMAP	No mapping was found
   2800 *
   2801 * This service returns the mapping specified by index iotsb_index from the
   2802 * IOTSB defined by the arguments devhandle, iotsb.
   2803 *
   2804 * Upon success, the real address of the mapping shall be returned in
   2805 * r_addr and thethe IOTTE mapping attributes shall be returned in
   2806 * iotte_attributes.
   2807 *
   2808 * The return value iotte_attributes may not include optional features used in
   2809 * the call to create the  mapping.
   2810 */
   2811#define HV_FAST_PCI_IOTSB_GETMAP	0x199
   2812
   2813/* pci_iotsb_sync_mappings()
   2814 * TRAP:	HV_FAST_TRAP
   2815 * FUNCTION:	HV_FAST_PCI_IOTSB_SYNC_MAPPINGS
   2816 * ARG0:	devhandle
   2817 * ARG1:	iotsb_handle
   2818 * ARG2:	iotsb_index
   2819 * ARG3:	#iottes
   2820 * RET0:	status
   2821 * RET1:	#synced
   2822 * ERROS:	EINVAL	Invalid devhandle, iotsb_handle, iotsb_index, or #iottes
   2823 *
   2824 * This service synchronizes #iottes mappings starting at index iotsb_index in
   2825 * the IOTSB defined by the arguments devhandle, iotsb.
   2826 *
   2827 * #iottes must be greater than zero.
   2828 *
   2829 * The actual number of IOTTEs synchronized is returned in #synced, which may
   2830 * be less than or equal to the requested number, #iottes.
   2831 *
   2832 * Upon a successful return, #synced is less than #iottes, the caller should
   2833 * continue to invoke this service with updated iotsb_index and #iottes
   2834 * arguments until all pages are synchronized.
   2835 */
   2836#define HV_FAST_PCI_IOTSB_SYNC_MAPPINGS	0x19a
   2837
   2838/* Logical Domain Channel services.  */
   2839
   2840#define LDC_CHANNEL_DOWN		0
   2841#define LDC_CHANNEL_UP			1
   2842#define LDC_CHANNEL_RESETTING		2
   2843
   2844/* ldc_tx_qconf()
   2845 * TRAP:	HV_FAST_TRAP
   2846 * FUNCTION:	HV_FAST_LDC_TX_QCONF
   2847 * ARG0:	channel ID
   2848 * ARG1:	real address base of queue
   2849 * ARG2:	num entries in queue
   2850 * RET0:	status
   2851 *
   2852 * Configure transmit queue for the LDC endpoint specified by the
   2853 * given channel ID, to be placed at the given real address, and
   2854 * be of the given num entries.  Num entries must be a power of two.
   2855 * The real address base of the queue must be aligned on the queue
   2856 * size.  Each queue entry is 64-bytes, so for example, a 32 entry
   2857 * queue must be aligned on a 2048 byte real address boundary.
   2858 *
   2859 * Upon configuration of a valid transmit queue the head and tail
   2860 * pointers are set to a hypervisor specific identical value indicating
   2861 * that the queue initially is empty.
   2862 *
   2863 * The endpoint's transmit queue is un-configured if num entries is zero.
   2864 *
   2865 * The maximum number of entries for each queue for a specific cpu may be
   2866 * determined from the machine description.  A transmit queue may be
   2867 * specified even in the event that the LDC is down (peer endpoint has no
   2868 * receive queue specified).  Transmission will begin as soon as the peer
   2869 * endpoint defines a receive queue.
   2870 *
   2871 * It is recommended that a guest wait for a transmit queue to empty prior
   2872 * to reconfiguring it, or un-configuring it.  Re or un-configuring of a
   2873 * non-empty transmit queue behaves exactly as defined above, however it
   2874 * is undefined as to how many of the pending entries in the original queue
   2875 * will be delivered prior to the re-configuration taking effect.
   2876 * Furthermore, as the queue configuration causes a reset of the head and
   2877 * tail pointers there is no way for a guest to determine how many entries
   2878 * have been sent after the configuration operation.
   2879 */
   2880#define HV_FAST_LDC_TX_QCONF		0xe0
   2881
   2882/* ldc_tx_qinfo()
   2883 * TRAP:	HV_FAST_TRAP
   2884 * FUNCTION:	HV_FAST_LDC_TX_QINFO
   2885 * ARG0:	channel ID
   2886 * RET0:	status
   2887 * RET1:	real address base of queue
   2888 * RET2:	num entries in queue
   2889 *
   2890 * Return the configuration info for the transmit queue of LDC endpoint
   2891 * defined by the given channel ID.  The real address is the currently
   2892 * defined real address base of the defined queue, and num entries is the
   2893 * size of the queue in terms of number of entries.
   2894 *
   2895 * If the specified channel ID is a valid endpoint number, but no transmit
   2896 * queue has been defined this service will return success, but with num
   2897 * entries set to zero and the real address will have an undefined value.
   2898 */
   2899#define HV_FAST_LDC_TX_QINFO		0xe1
   2900
   2901/* ldc_tx_get_state()
   2902 * TRAP:	HV_FAST_TRAP
   2903 * FUNCTION:	HV_FAST_LDC_TX_GET_STATE
   2904 * ARG0:	channel ID
   2905 * RET0:	status
   2906 * RET1:	head offset
   2907 * RET2:	tail offset
   2908 * RET3:	channel state
   2909 *
   2910 * Return the transmit state, and the head and tail queue pointers, for
   2911 * the transmit queue of the LDC endpoint defined by the given channel ID.
   2912 * The head and tail values are the byte offset of the head and tail
   2913 * positions of the transmit queue for the specified endpoint.
   2914 */
   2915#define HV_FAST_LDC_TX_GET_STATE	0xe2
   2916
   2917/* ldc_tx_set_qtail()
   2918 * TRAP:	HV_FAST_TRAP
   2919 * FUNCTION:	HV_FAST_LDC_TX_SET_QTAIL
   2920 * ARG0:	channel ID
   2921 * ARG1:	tail offset
   2922 * RET0:	status
   2923 *
   2924 * Update the tail pointer for the transmit queue associated with the LDC
   2925 * endpoint defined by the given channel ID.  The tail offset specified
   2926 * must be aligned on a 64 byte boundary, and calculated so as to increase
   2927 * the number of pending entries on the transmit queue.  Any attempt to
   2928 * decrease the number of pending transmit queue entires is considered
   2929 * an invalid tail offset and will result in an EINVAL error.
   2930 *
   2931 * Since the tail of the transmit queue may not be moved backwards, the
   2932 * transmit queue may be flushed by configuring a new transmit queue,
   2933 * whereupon the hypervisor will configure the initial transmit head and
   2934 * tail pointers to be equal.
   2935 */
   2936#define HV_FAST_LDC_TX_SET_QTAIL	0xe3
   2937
   2938/* ldc_rx_qconf()
   2939 * TRAP:	HV_FAST_TRAP
   2940 * FUNCTION:	HV_FAST_LDC_RX_QCONF
   2941 * ARG0:	channel ID
   2942 * ARG1:	real address base of queue
   2943 * ARG2:	num entries in queue
   2944 * RET0:	status
   2945 *
   2946 * Configure receive queue for the LDC endpoint specified by the
   2947 * given channel ID, to be placed at the given real address, and
   2948 * be of the given num entries.  Num entries must be a power of two.
   2949 * The real address base of the queue must be aligned on the queue
   2950 * size.  Each queue entry is 64-bytes, so for example, a 32 entry
   2951 * queue must be aligned on a 2048 byte real address boundary.
   2952 *
   2953 * The endpoint's transmit queue is un-configured if num entries is zero.
   2954 *
   2955 * If a valid receive queue is specified for a local endpoint the LDC is
   2956 * in the up state for the purpose of transmission to this endpoint.
   2957 *
   2958 * The maximum number of entries for each queue for a specific cpu may be
   2959 * determined from the machine description.
   2960 *
   2961 * As receive queue configuration causes a reset of the queue's head and
   2962 * tail pointers there is no way for a gues to determine how many entries
   2963 * have been received between a preceding ldc_get_rx_state() API call
   2964 * and the completion of the configuration operation.  It should be noted
   2965 * that datagram delivery is not guaranteed via domain channels anyway,
   2966 * and therefore any higher protocol should be resilient to datagram
   2967 * loss if necessary.  However, to overcome this specific race potential
   2968 * it is recommended, for example, that a higher level protocol be employed
   2969 * to ensure either retransmission, or ensure that no datagrams are pending
   2970 * on the peer endpoint's transmit queue prior to the configuration process.
   2971 */
   2972#define HV_FAST_LDC_RX_QCONF		0xe4
   2973
   2974/* ldc_rx_qinfo()
   2975 * TRAP:	HV_FAST_TRAP
   2976 * FUNCTION:	HV_FAST_LDC_RX_QINFO
   2977 * ARG0:	channel ID
   2978 * RET0:	status
   2979 * RET1:	real address base of queue
   2980 * RET2:	num entries in queue
   2981 *
   2982 * Return the configuration info for the receive queue of LDC endpoint
   2983 * defined by the given channel ID.  The real address is the currently
   2984 * defined real address base of the defined queue, and num entries is the
   2985 * size of the queue in terms of number of entries.
   2986 *
   2987 * If the specified channel ID is a valid endpoint number, but no receive
   2988 * queue has been defined this service will return success, but with num
   2989 * entries set to zero and the real address will have an undefined value.
   2990 */
   2991#define HV_FAST_LDC_RX_QINFO		0xe5
   2992
   2993/* ldc_rx_get_state()
   2994 * TRAP:	HV_FAST_TRAP
   2995 * FUNCTION:	HV_FAST_LDC_RX_GET_STATE
   2996 * ARG0:	channel ID
   2997 * RET0:	status
   2998 * RET1:	head offset
   2999 * RET2:	tail offset
   3000 * RET3:	channel state
   3001 *
   3002 * Return the receive state, and the head and tail queue pointers, for
   3003 * the receive queue of the LDC endpoint defined by the given channel ID.
   3004 * The head and tail values are the byte offset of the head and tail
   3005 * positions of the receive queue for the specified endpoint.
   3006 */
   3007#define HV_FAST_LDC_RX_GET_STATE	0xe6
   3008
   3009/* ldc_rx_set_qhead()
   3010 * TRAP:	HV_FAST_TRAP
   3011 * FUNCTION:	HV_FAST_LDC_RX_SET_QHEAD
   3012 * ARG0:	channel ID
   3013 * ARG1:	head offset
   3014 * RET0:	status
   3015 *
   3016 * Update the head pointer for the receive queue associated with the LDC
   3017 * endpoint defined by the given channel ID.  The head offset specified
   3018 * must be aligned on a 64 byte boundary, and calculated so as to decrease
   3019 * the number of pending entries on the receive queue.  Any attempt to
   3020 * increase the number of pending receive queue entires is considered
   3021 * an invalid head offset and will result in an EINVAL error.
   3022 *
   3023 * The receive queue may be flushed by setting the head offset equal
   3024 * to the current tail offset.
   3025 */
   3026#define HV_FAST_LDC_RX_SET_QHEAD	0xe7
   3027
   3028/* LDC Map Table Entry.  Each slot is defined by a translation table
   3029 * entry, as specified by the LDC_MTE_* bits below, and a 64-bit
   3030 * hypervisor invalidation cookie.
   3031 */
   3032#define LDC_MTE_PADDR	0x0fffffffffffe000 /* pa[55:13]          */
   3033#define LDC_MTE_COPY_W	0x0000000000000400 /* copy write access  */
   3034#define LDC_MTE_COPY_R	0x0000000000000200 /* copy read access   */
   3035#define LDC_MTE_IOMMU_W	0x0000000000000100 /* IOMMU write access */
   3036#define LDC_MTE_IOMMU_R	0x0000000000000080 /* IOMMU read access  */
   3037#define LDC_MTE_EXEC	0x0000000000000040 /* execute            */
   3038#define LDC_MTE_WRITE	0x0000000000000020 /* read               */
   3039#define LDC_MTE_READ	0x0000000000000010 /* write              */
   3040#define LDC_MTE_SZALL	0x000000000000000f /* page size bits     */
   3041#define LDC_MTE_SZ16GB	0x0000000000000007 /* 16GB page          */
   3042#define LDC_MTE_SZ2GB	0x0000000000000006 /* 2GB page           */
   3043#define LDC_MTE_SZ256MB	0x0000000000000005 /* 256MB page         */
   3044#define LDC_MTE_SZ32MB	0x0000000000000004 /* 32MB page          */
   3045#define LDC_MTE_SZ4MB	0x0000000000000003 /* 4MB page           */
   3046#define LDC_MTE_SZ512K	0x0000000000000002 /* 512K page          */
   3047#define LDC_MTE_SZ64K	0x0000000000000001 /* 64K page           */
   3048#define LDC_MTE_SZ8K	0x0000000000000000 /* 8K page            */
   3049
   3050#ifndef __ASSEMBLY__
   3051struct ldc_mtable_entry {
   3052	unsigned long	mte;
   3053	unsigned long	cookie;
   3054};
   3055#endif
   3056
   3057/* ldc_set_map_table()
   3058 * TRAP:	HV_FAST_TRAP
   3059 * FUNCTION:	HV_FAST_LDC_SET_MAP_TABLE
   3060 * ARG0:	channel ID
   3061 * ARG1:	table real address
   3062 * ARG2:	num entries
   3063 * RET0:	status
   3064 *
   3065 * Register the MTE table at the given table real address, with the
   3066 * specified num entries, for the LDC indicated by the given channel
   3067 * ID.
   3068 */
   3069#define HV_FAST_LDC_SET_MAP_TABLE	0xea
   3070
   3071/* ldc_get_map_table()
   3072 * TRAP:	HV_FAST_TRAP
   3073 * FUNCTION:	HV_FAST_LDC_GET_MAP_TABLE
   3074 * ARG0:	channel ID
   3075 * RET0:	status
   3076 * RET1:	table real address
   3077 * RET2:	num entries
   3078 *
   3079 * Return the configuration of the current mapping table registered
   3080 * for the given channel ID.
   3081 */
   3082#define HV_FAST_LDC_GET_MAP_TABLE	0xeb
   3083
   3084#define LDC_COPY_IN	0
   3085#define LDC_COPY_OUT	1
   3086
   3087/* ldc_copy()
   3088 * TRAP:	HV_FAST_TRAP
   3089 * FUNCTION:	HV_FAST_LDC_COPY
   3090 * ARG0:	channel ID
   3091 * ARG1:	LDC_COPY_* direction code
   3092 * ARG2:	target real address
   3093 * ARG3:	local real address
   3094 * ARG4:	length in bytes
   3095 * RET0:	status
   3096 * RET1:	actual length in bytes
   3097 */
   3098#define HV_FAST_LDC_COPY		0xec
   3099
   3100#define LDC_MEM_READ	1
   3101#define LDC_MEM_WRITE	2
   3102#define LDC_MEM_EXEC	4
   3103
   3104/* ldc_mapin()
   3105 * TRAP:	HV_FAST_TRAP
   3106 * FUNCTION:	HV_FAST_LDC_MAPIN
   3107 * ARG0:	channel ID
   3108 * ARG1:	cookie
   3109 * RET0:	status
   3110 * RET1:	real address
   3111 * RET2:	LDC_MEM_* permissions
   3112 */
   3113#define HV_FAST_LDC_MAPIN		0xed
   3114
   3115/* ldc_unmap()
   3116 * TRAP:	HV_FAST_TRAP
   3117 * FUNCTION:	HV_FAST_LDC_UNMAP
   3118 * ARG0:	real address
   3119 * RET0:	status
   3120 */
   3121#define HV_FAST_LDC_UNMAP		0xee
   3122
   3123/* ldc_revoke()
   3124 * TRAP:	HV_FAST_TRAP
   3125 * FUNCTION:	HV_FAST_LDC_REVOKE
   3126 * ARG0:	channel ID
   3127 * ARG1:	cookie
   3128 * ARG2:	ldc_mtable_entry cookie
   3129 * RET0:	status
   3130 */
   3131#define HV_FAST_LDC_REVOKE		0xef
   3132
   3133#ifndef __ASSEMBLY__
   3134unsigned long sun4v_ldc_tx_qconf(unsigned long channel,
   3135				 unsigned long ra,
   3136				 unsigned long num_entries);
   3137unsigned long sun4v_ldc_tx_qinfo(unsigned long channel,
   3138				 unsigned long *ra,
   3139				 unsigned long *num_entries);
   3140unsigned long sun4v_ldc_tx_get_state(unsigned long channel,
   3141				     unsigned long *head_off,
   3142				     unsigned long *tail_off,
   3143				     unsigned long *chan_state);
   3144unsigned long sun4v_ldc_tx_set_qtail(unsigned long channel,
   3145				     unsigned long tail_off);
   3146unsigned long sun4v_ldc_rx_qconf(unsigned long channel,
   3147				 unsigned long ra,
   3148				 unsigned long num_entries);
   3149unsigned long sun4v_ldc_rx_qinfo(unsigned long channel,
   3150				 unsigned long *ra,
   3151				 unsigned long *num_entries);
   3152unsigned long sun4v_ldc_rx_get_state(unsigned long channel,
   3153				     unsigned long *head_off,
   3154				     unsigned long *tail_off,
   3155				     unsigned long *chan_state);
   3156unsigned long sun4v_ldc_rx_set_qhead(unsigned long channel,
   3157				     unsigned long head_off);
   3158unsigned long sun4v_ldc_set_map_table(unsigned long channel,
   3159				      unsigned long ra,
   3160				      unsigned long num_entries);
   3161unsigned long sun4v_ldc_get_map_table(unsigned long channel,
   3162				      unsigned long *ra,
   3163				      unsigned long *num_entries);
   3164unsigned long sun4v_ldc_copy(unsigned long channel,
   3165			     unsigned long dir_code,
   3166			     unsigned long tgt_raddr,
   3167			     unsigned long lcl_raddr,
   3168			     unsigned long len,
   3169			     unsigned long *actual_len);
   3170unsigned long sun4v_ldc_mapin(unsigned long channel,
   3171			      unsigned long cookie,
   3172			      unsigned long *ra,
   3173			      unsigned long *perm);
   3174unsigned long sun4v_ldc_unmap(unsigned long ra);
   3175unsigned long sun4v_ldc_revoke(unsigned long channel,
   3176			       unsigned long cookie,
   3177			       unsigned long mte_cookie);
   3178#endif
   3179
   3180/* Performance counter services.  */
   3181
   3182#define HV_PERF_JBUS_PERF_CTRL_REG	0x00
   3183#define HV_PERF_JBUS_PERF_CNT_REG	0x01
   3184#define HV_PERF_DRAM_PERF_CTRL_REG_0	0x02
   3185#define HV_PERF_DRAM_PERF_CNT_REG_0	0x03
   3186#define HV_PERF_DRAM_PERF_CTRL_REG_1	0x04
   3187#define HV_PERF_DRAM_PERF_CNT_REG_1	0x05
   3188#define HV_PERF_DRAM_PERF_CTRL_REG_2	0x06
   3189#define HV_PERF_DRAM_PERF_CNT_REG_2	0x07
   3190#define HV_PERF_DRAM_PERF_CTRL_REG_3	0x08
   3191#define HV_PERF_DRAM_PERF_CNT_REG_3	0x09
   3192
   3193/* get_perfreg()
   3194 * TRAP:	HV_FAST_TRAP
   3195 * FUNCTION:	HV_FAST_GET_PERFREG
   3196 * ARG0:	performance reg number
   3197 * RET0:	status
   3198 * RET1:	performance reg value
   3199 * ERRORS:	EINVAL		Invalid performance register number
   3200 *		ENOACCESS	No access allowed to performance counters
   3201 *
   3202 * Read the value of the given DRAM/JBUS performance counter/control register.
   3203 */
   3204#define HV_FAST_GET_PERFREG		0x100
   3205
   3206/* set_perfreg()
   3207 * TRAP:	HV_FAST_TRAP
   3208 * FUNCTION:	HV_FAST_SET_PERFREG
   3209 * ARG0:	performance reg number
   3210 * ARG1:	performance reg value
   3211 * RET0:	status
   3212 * ERRORS:	EINVAL		Invalid performance register number
   3213 *		ENOACCESS	No access allowed to performance counters
   3214 *
   3215 * Write the given performance reg value to the given DRAM/JBUS
   3216 * performance counter/control register.
   3217 */
   3218#define HV_FAST_SET_PERFREG		0x101
   3219
   3220#define HV_N2_PERF_SPARC_CTL		0x0
   3221#define HV_N2_PERF_DRAM_CTL0		0x1
   3222#define HV_N2_PERF_DRAM_CNT0		0x2
   3223#define HV_N2_PERF_DRAM_CTL1		0x3
   3224#define HV_N2_PERF_DRAM_CNT1		0x4
   3225#define HV_N2_PERF_DRAM_CTL2		0x5
   3226#define HV_N2_PERF_DRAM_CNT2		0x6
   3227#define HV_N2_PERF_DRAM_CTL3		0x7
   3228#define HV_N2_PERF_DRAM_CNT3		0x8
   3229
   3230#define HV_FAST_N2_GET_PERFREG		0x104
   3231#define HV_FAST_N2_SET_PERFREG		0x105
   3232
   3233#ifndef __ASSEMBLY__
   3234unsigned long sun4v_niagara_getperf(unsigned long reg,
   3235				    unsigned long *val);
   3236unsigned long sun4v_niagara_setperf(unsigned long reg,
   3237				    unsigned long val);
   3238unsigned long sun4v_niagara2_getperf(unsigned long reg,
   3239				     unsigned long *val);
   3240unsigned long sun4v_niagara2_setperf(unsigned long reg,
   3241				     unsigned long val);
   3242#endif
   3243
   3244/* MMU statistics services.
   3245 *
   3246 * The hypervisor maintains MMU statistics and privileged code provides
   3247 * a buffer where these statistics can be collected.  It is continually
   3248 * updated once configured.  The layout is as follows:
   3249 */
   3250#ifndef __ASSEMBLY__
   3251struct hv_mmu_statistics {
   3252	unsigned long immu_tsb_hits_ctx0_8k_tte;
   3253	unsigned long immu_tsb_ticks_ctx0_8k_tte;
   3254	unsigned long immu_tsb_hits_ctx0_64k_tte;
   3255	unsigned long immu_tsb_ticks_ctx0_64k_tte;
   3256	unsigned long __reserved1[2];
   3257	unsigned long immu_tsb_hits_ctx0_4mb_tte;
   3258	unsigned long immu_tsb_ticks_ctx0_4mb_tte;
   3259	unsigned long __reserved2[2];
   3260	unsigned long immu_tsb_hits_ctx0_256mb_tte;
   3261	unsigned long immu_tsb_ticks_ctx0_256mb_tte;
   3262	unsigned long __reserved3[4];
   3263	unsigned long immu_tsb_hits_ctxnon0_8k_tte;
   3264	unsigned long immu_tsb_ticks_ctxnon0_8k_tte;
   3265	unsigned long immu_tsb_hits_ctxnon0_64k_tte;
   3266	unsigned long immu_tsb_ticks_ctxnon0_64k_tte;
   3267	unsigned long __reserved4[2];
   3268	unsigned long immu_tsb_hits_ctxnon0_4mb_tte;
   3269	unsigned long immu_tsb_ticks_ctxnon0_4mb_tte;
   3270	unsigned long __reserved5[2];
   3271	unsigned long immu_tsb_hits_ctxnon0_256mb_tte;
   3272	unsigned long immu_tsb_ticks_ctxnon0_256mb_tte;
   3273	unsigned long __reserved6[4];
   3274	unsigned long dmmu_tsb_hits_ctx0_8k_tte;
   3275	unsigned long dmmu_tsb_ticks_ctx0_8k_tte;
   3276	unsigned long dmmu_tsb_hits_ctx0_64k_tte;
   3277	unsigned long dmmu_tsb_ticks_ctx0_64k_tte;
   3278	unsigned long __reserved7[2];
   3279	unsigned long dmmu_tsb_hits_ctx0_4mb_tte;
   3280	unsigned long dmmu_tsb_ticks_ctx0_4mb_tte;
   3281	unsigned long __reserved8[2];
   3282	unsigned long dmmu_tsb_hits_ctx0_256mb_tte;
   3283	unsigned long dmmu_tsb_ticks_ctx0_256mb_tte;
   3284	unsigned long __reserved9[4];
   3285	unsigned long dmmu_tsb_hits_ctxnon0_8k_tte;
   3286	unsigned long dmmu_tsb_ticks_ctxnon0_8k_tte;
   3287	unsigned long dmmu_tsb_hits_ctxnon0_64k_tte;
   3288	unsigned long dmmu_tsb_ticks_ctxnon0_64k_tte;
   3289	unsigned long __reserved10[2];
   3290	unsigned long dmmu_tsb_hits_ctxnon0_4mb_tte;
   3291	unsigned long dmmu_tsb_ticks_ctxnon0_4mb_tte;
   3292	unsigned long __reserved11[2];
   3293	unsigned long dmmu_tsb_hits_ctxnon0_256mb_tte;
   3294	unsigned long dmmu_tsb_ticks_ctxnon0_256mb_tte;
   3295	unsigned long __reserved12[4];
   3296};
   3297#endif
   3298
   3299/* mmustat_conf()
   3300 * TRAP:	HV_FAST_TRAP
   3301 * FUNCTION:	HV_FAST_MMUSTAT_CONF
   3302 * ARG0:	real address
   3303 * RET0:	status
   3304 * RET1:	real address
   3305 * ERRORS:	ENORADDR	Invalid real address
   3306 *		EBADALIGN	Real address not aligned on 64-byte boundary
   3307 *		EBADTRAP	API not supported on this processor
   3308 *
   3309 * Enable MMU statistic gathering using the buffer at the given real
   3310 * address on the current virtual CPU.  The new buffer real address
   3311 * is given in ARG1, and the previously specified buffer real address
   3312 * is returned in RET1, or is returned as zero for the first invocation.
   3313 *
   3314 * If the passed in real address argument is zero, this will disable
   3315 * MMU statistic collection on the current virtual CPU.  If an error is
   3316 * returned then no statistics are collected.
   3317 *
   3318 * The buffer contents should be initialized to all zeros before being
   3319 * given to the hypervisor or else the statistics will be meaningless.
   3320 */
   3321#define HV_FAST_MMUSTAT_CONF		0x102
   3322
   3323/* mmustat_info()
   3324 * TRAP:	HV_FAST_TRAP
   3325 * FUNCTION:	HV_FAST_MMUSTAT_INFO
   3326 * RET0:	status
   3327 * RET1:	real address
   3328 * ERRORS:	EBADTRAP	API not supported on this processor
   3329 *
   3330 * Return the current state and real address of the currently configured
   3331 * MMU statistics buffer on the current virtual CPU.
   3332 */
   3333#define HV_FAST_MMUSTAT_INFO		0x103
   3334
   3335#ifndef __ASSEMBLY__
   3336unsigned long sun4v_mmustat_conf(unsigned long ra, unsigned long *orig_ra);
   3337unsigned long sun4v_mmustat_info(unsigned long *ra);
   3338#endif
   3339
   3340/* NCS crypto services  */
   3341
   3342/* ncs_request() sub-function numbers */
   3343#define HV_NCS_QCONF			0x01
   3344#define HV_NCS_QTAIL_UPDATE		0x02
   3345
   3346#ifndef __ASSEMBLY__
   3347struct hv_ncs_queue_entry {
   3348	/* MAU Control Register */
   3349	unsigned long	mau_control;
   3350#define MAU_CONTROL_INV_PARITY	0x0000000000002000
   3351#define MAU_CONTROL_STRAND	0x0000000000001800
   3352#define MAU_CONTROL_BUSY	0x0000000000000400
   3353#define MAU_CONTROL_INT		0x0000000000000200
   3354#define MAU_CONTROL_OP		0x00000000000001c0
   3355#define MAU_CONTROL_OP_SHIFT	6
   3356#define MAU_OP_LOAD_MA_MEMORY	0x0
   3357#define MAU_OP_STORE_MA_MEMORY	0x1
   3358#define MAU_OP_MODULAR_MULT	0x2
   3359#define MAU_OP_MODULAR_REDUCE	0x3
   3360#define MAU_OP_MODULAR_EXP_LOOP	0x4
   3361#define MAU_CONTROL_LEN		0x000000000000003f
   3362#define MAU_CONTROL_LEN_SHIFT	0
   3363
   3364	/* Real address of bytes to load or store bytes
   3365	 * into/out-of the MAU.
   3366	 */
   3367	unsigned long	mau_mpa;
   3368
   3369	/* Modular Arithmetic MA Offset Register.  */
   3370	unsigned long	mau_ma;
   3371
   3372	/* Modular Arithmetic N Prime Register.  */
   3373	unsigned long	mau_np;
   3374};
   3375
   3376struct hv_ncs_qconf_arg {
   3377	unsigned long	mid;      /* MAU ID, 1 per core on Niagara */
   3378	unsigned long	base;     /* Real address base of queue */
   3379	unsigned long	end;	  /* Real address end of queue */
   3380	unsigned long	num_ents; /* Number of entries in queue */
   3381};
   3382
   3383struct hv_ncs_qtail_update_arg {
   3384	unsigned long	mid;      /* MAU ID, 1 per core on Niagara */
   3385	unsigned long	tail;     /* New tail index to use */
   3386	unsigned long	syncflag; /* only SYNCFLAG_SYNC is implemented */
   3387#define HV_NCS_SYNCFLAG_SYNC	0x00
   3388#define HV_NCS_SYNCFLAG_ASYNC	0x01
   3389};
   3390#endif
   3391
   3392/* ncs_request()
   3393 * TRAP:	HV_FAST_TRAP
   3394 * FUNCTION:	HV_FAST_NCS_REQUEST
   3395 * ARG0:	NCS sub-function
   3396 * ARG1:	sub-function argument real address
   3397 * ARG2:	size in bytes of sub-function argument
   3398 * RET0:	status
   3399 *
   3400 * The MAU chip of the Niagara processor is not directly accessible
   3401 * to privileged code, instead it is programmed indirectly via this
   3402 * hypervisor API.
   3403 *
   3404 * The interfaces defines a queue of MAU operations to perform.
   3405 * Privileged code registers a queue with the hypervisor by invoking
   3406 * this HVAPI with the HV_NCS_QCONF sub-function, which defines the
   3407 * base, end, and number of entries of the queue.  Each queue entry
   3408 * contains a MAU register struct block.
   3409 *
   3410 * The privileged code then proceeds to add entries to the queue and
   3411 * then invoke the HV_NCS_QTAIL_UPDATE sub-function.  Since only
   3412 * synchronous operations are supported by the current hypervisor,
   3413 * HV_NCS_QTAIL_UPDATE will run all the pending queue entries to
   3414 * completion and return HV_EOK, or return an error code.
   3415 *
   3416 * The real address of the sub-function argument must be aligned on at
   3417 * least an 8-byte boundary.
   3418 *
   3419 * The tail argument of HV_NCS_QTAIL_UPDATE is an index, not a byte
   3420 * offset, into the queue and must be less than or equal the 'num_ents'
   3421 * argument given in the HV_NCS_QCONF call.
   3422 */
   3423#define HV_FAST_NCS_REQUEST		0x110
   3424
   3425#ifndef __ASSEMBLY__
   3426unsigned long sun4v_ncs_request(unsigned long request,
   3427			        unsigned long arg_ra,
   3428			        unsigned long arg_size);
   3429#endif
   3430
   3431#define HV_FAST_FIRE_GET_PERFREG	0x120
   3432#define HV_FAST_FIRE_SET_PERFREG	0x121
   3433
   3434#define HV_FAST_REBOOT_DATA_SET		0x172
   3435
   3436#ifndef __ASSEMBLY__
   3437unsigned long sun4v_reboot_data_set(unsigned long ra,
   3438				    unsigned long len);
   3439#endif
   3440
   3441#define HV_FAST_VT_GET_PERFREG		0x184
   3442#define HV_FAST_VT_SET_PERFREG		0x185
   3443
   3444#ifndef __ASSEMBLY__
   3445unsigned long sun4v_vt_get_perfreg(unsigned long reg_num,
   3446				   unsigned long *reg_val);
   3447unsigned long sun4v_vt_set_perfreg(unsigned long reg_num,
   3448				   unsigned long reg_val);
   3449#endif
   3450
   3451#define	HV_FAST_T5_GET_PERFREG		0x1a8
   3452#define	HV_FAST_T5_SET_PERFREG		0x1a9
   3453
   3454#ifndef	__ASSEMBLY__
   3455unsigned long sun4v_t5_get_perfreg(unsigned long reg_num,
   3456				   unsigned long *reg_val);
   3457unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
   3458				   unsigned long reg_val);
   3459#endif
   3460
   3461
   3462#define HV_FAST_M7_GET_PERFREG	0x43
   3463#define HV_FAST_M7_SET_PERFREG	0x44
   3464
   3465#ifndef	__ASSEMBLY__
   3466unsigned long sun4v_m7_get_perfreg(unsigned long reg_num,
   3467				      unsigned long *reg_val);
   3468unsigned long sun4v_m7_set_perfreg(unsigned long reg_num,
   3469				      unsigned long reg_val);
   3470#endif
   3471
   3472/* Function numbers for HV_CORE_TRAP.  */
   3473#define HV_CORE_SET_VER			0x00
   3474#define HV_CORE_PUTCHAR			0x01
   3475#define HV_CORE_EXIT			0x02
   3476#define HV_CORE_GET_VER			0x03
   3477
   3478/* Hypervisor API groups for use with HV_CORE_SET_VER and
   3479 * HV_CORE_GET_VER.
   3480 */
   3481#define HV_GRP_SUN4V			0x0000
   3482#define HV_GRP_CORE			0x0001
   3483#define HV_GRP_INTR			0x0002
   3484#define HV_GRP_SOFT_STATE		0x0003
   3485#define HV_GRP_TM			0x0080
   3486#define HV_GRP_PCI			0x0100
   3487#define HV_GRP_LDOM			0x0101
   3488#define HV_GRP_SVC_CHAN			0x0102
   3489#define HV_GRP_NCS			0x0103
   3490#define HV_GRP_RNG			0x0104
   3491#define HV_GRP_PBOOT			0x0105
   3492#define HV_GRP_TPM			0x0107
   3493#define HV_GRP_SDIO			0x0108
   3494#define HV_GRP_SDIO_ERR			0x0109
   3495#define HV_GRP_REBOOT_DATA		0x0110
   3496#define HV_GRP_ATU			0x0111
   3497#define HV_GRP_DAX			0x0113
   3498#define HV_GRP_M7_PERF			0x0114
   3499#define HV_GRP_NIAG_PERF		0x0200
   3500#define HV_GRP_FIRE_PERF		0x0201
   3501#define HV_GRP_N2_CPU			0x0202
   3502#define HV_GRP_NIU			0x0204
   3503#define HV_GRP_VF_CPU			0x0205
   3504#define HV_GRP_KT_CPU			0x0209
   3505#define HV_GRP_VT_CPU			0x020c
   3506#define HV_GRP_T5_CPU			0x0211
   3507#define HV_GRP_DIAG			0x0300
   3508
   3509#ifndef __ASSEMBLY__
   3510unsigned long sun4v_get_version(unsigned long group,
   3511			        unsigned long *major,
   3512			        unsigned long *minor);
   3513unsigned long sun4v_set_version(unsigned long group,
   3514			        unsigned long major,
   3515			        unsigned long minor,
   3516			        unsigned long *actual_minor);
   3517
   3518int sun4v_hvapi_register(unsigned long group, unsigned long major,
   3519			 unsigned long *minor);
   3520void sun4v_hvapi_unregister(unsigned long group);
   3521int sun4v_hvapi_get(unsigned long group,
   3522		    unsigned long *major,
   3523		    unsigned long *minor);
   3524void sun4v_hvapi_init(void);
   3525#endif
   3526
   3527#endif /* !(_SPARC64_HYPERVISOR_H) */