cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

lock_events_list.h (3159B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * This program is free software; you can redistribute it and/or modify
      4 * it under the terms of the GNU General Public License as published by
      5 * the Free Software Foundation; either version 2 of the License, or
      6 * (at your option) any later version.
      7 *
      8 * This program is distributed in the hope that it will be useful,
      9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
     10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     11 * GNU General Public License for more details.
     12 *
     13 * Authors: Waiman Long <longman@redhat.com>
     14 */
     15
     16#ifndef LOCK_EVENT
     17#define LOCK_EVENT(name)	LOCKEVENT_ ## name,
     18#endif
     19
     20#ifdef CONFIG_QUEUED_SPINLOCKS
     21#ifdef CONFIG_PARAVIRT_SPINLOCKS
     22/*
     23 * Locking events for PV qspinlock.
     24 */
     25LOCK_EVENT(pv_hash_hops)	/* Average # of hops per hashing operation */
     26LOCK_EVENT(pv_kick_unlock)	/* # of vCPU kicks issued at unlock time   */
     27LOCK_EVENT(pv_kick_wake)	/* # of vCPU kicks for pv_latency_wake	   */
     28LOCK_EVENT(pv_latency_kick)	/* Average latency (ns) of vCPU kick	   */
     29LOCK_EVENT(pv_latency_wake)	/* Average latency (ns) of kick-to-wakeup  */
     30LOCK_EVENT(pv_lock_stealing)	/* # of lock stealing operations	   */
     31LOCK_EVENT(pv_spurious_wakeup)	/* # of spurious wakeups in non-head vCPUs */
     32LOCK_EVENT(pv_wait_again)	/* # of wait's after queue head vCPU kick  */
     33LOCK_EVENT(pv_wait_early)	/* # of early vCPU wait's		   */
     34LOCK_EVENT(pv_wait_head)	/* # of vCPU wait's at the queue head	   */
     35LOCK_EVENT(pv_wait_node)	/* # of vCPU wait's at non-head queue node */
     36#endif /* CONFIG_PARAVIRT_SPINLOCKS */
     37
     38/*
     39 * Locking events for qspinlock
     40 *
     41 * Subtracting lock_use_node[234] from lock_slowpath will give you
     42 * lock_use_node1.
     43 */
     44LOCK_EVENT(lock_pending)	/* # of locking ops via pending code	     */
     45LOCK_EVENT(lock_slowpath)	/* # of locking ops via MCS lock queue	     */
     46LOCK_EVENT(lock_use_node2)	/* # of locking ops that use 2nd percpu node */
     47LOCK_EVENT(lock_use_node3)	/* # of locking ops that use 3rd percpu node */
     48LOCK_EVENT(lock_use_node4)	/* # of locking ops that use 4th percpu node */
     49LOCK_EVENT(lock_no_node)	/* # of locking ops w/o using percpu node    */
     50#endif /* CONFIG_QUEUED_SPINLOCKS */
     51
     52/*
     53 * Locking events for rwsem
     54 */
     55LOCK_EVENT(rwsem_sleep_reader)	/* # of reader sleeps			*/
     56LOCK_EVENT(rwsem_sleep_writer)	/* # of writer sleeps			*/
     57LOCK_EVENT(rwsem_wake_reader)	/* # of reader wakeups			*/
     58LOCK_EVENT(rwsem_wake_writer)	/* # of writer wakeups			*/
     59LOCK_EVENT(rwsem_opt_lock)	/* # of opt-acquired write locks	*/
     60LOCK_EVENT(rwsem_opt_fail)	/* # of failed optspins			*/
     61LOCK_EVENT(rwsem_opt_nospin)	/* # of disabled optspins		*/
     62LOCK_EVENT(rwsem_rlock)		/* # of read locks acquired		*/
     63LOCK_EVENT(rwsem_rlock_steal)	/* # of read locks by lock stealing	*/
     64LOCK_EVENT(rwsem_rlock_fast)	/* # of fast read locks acquired	*/
     65LOCK_EVENT(rwsem_rlock_fail)	/* # of failed read lock acquisitions	*/
     66LOCK_EVENT(rwsem_rlock_handoff)	/* # of read lock handoffs		*/
     67LOCK_EVENT(rwsem_wlock)		/* # of write locks acquired		*/
     68LOCK_EVENT(rwsem_wlock_fail)	/* # of failed write lock acquisitions	*/
     69LOCK_EVENT(rwsem_wlock_handoff)	/* # of write lock handoffs		*/