cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

kmem.h (9318B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#undef TRACE_SYSTEM
      3#define TRACE_SYSTEM kmem
      4
      5#if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
      6#define _TRACE_KMEM_H
      7
      8#include <linux/types.h>
      9#include <linux/tracepoint.h>
     10#include <trace/events/mmflags.h>
     11
     12DECLARE_EVENT_CLASS(kmem_alloc,
     13
     14	TP_PROTO(unsigned long call_site,
     15		 const void *ptr,
     16		 size_t bytes_req,
     17		 size_t bytes_alloc,
     18		 gfp_t gfp_flags),
     19
     20	TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
     21
     22	TP_STRUCT__entry(
     23		__field(	unsigned long,	call_site	)
     24		__field(	const void *,	ptr		)
     25		__field(	size_t,		bytes_req	)
     26		__field(	size_t,		bytes_alloc	)
     27		__field(	unsigned long,	gfp_flags	)
     28	),
     29
     30	TP_fast_assign(
     31		__entry->call_site	= call_site;
     32		__entry->ptr		= ptr;
     33		__entry->bytes_req	= bytes_req;
     34		__entry->bytes_alloc	= bytes_alloc;
     35		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
     36	),
     37
     38	TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
     39		(void *)__entry->call_site,
     40		__entry->ptr,
     41		__entry->bytes_req,
     42		__entry->bytes_alloc,
     43		show_gfp_flags(__entry->gfp_flags))
     44);
     45
     46DEFINE_EVENT(kmem_alloc, kmalloc,
     47
     48	TP_PROTO(unsigned long call_site, const void *ptr,
     49		 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
     50
     51	TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
     52);
     53
     54DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
     55
     56	TP_PROTO(unsigned long call_site, const void *ptr,
     57		 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
     58
     59	TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
     60);
     61
     62DECLARE_EVENT_CLASS(kmem_alloc_node,
     63
     64	TP_PROTO(unsigned long call_site,
     65		 const void *ptr,
     66		 size_t bytes_req,
     67		 size_t bytes_alloc,
     68		 gfp_t gfp_flags,
     69		 int node),
     70
     71	TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
     72
     73	TP_STRUCT__entry(
     74		__field(	unsigned long,	call_site	)
     75		__field(	const void *,	ptr		)
     76		__field(	size_t,		bytes_req	)
     77		__field(	size_t,		bytes_alloc	)
     78		__field(	unsigned long,	gfp_flags	)
     79		__field(	int,		node		)
     80	),
     81
     82	TP_fast_assign(
     83		__entry->call_site	= call_site;
     84		__entry->ptr		= ptr;
     85		__entry->bytes_req	= bytes_req;
     86		__entry->bytes_alloc	= bytes_alloc;
     87		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
     88		__entry->node		= node;
     89	),
     90
     91	TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
     92		(void *)__entry->call_site,
     93		__entry->ptr,
     94		__entry->bytes_req,
     95		__entry->bytes_alloc,
     96		show_gfp_flags(__entry->gfp_flags),
     97		__entry->node)
     98);
     99
    100DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
    101
    102	TP_PROTO(unsigned long call_site, const void *ptr,
    103		 size_t bytes_req, size_t bytes_alloc,
    104		 gfp_t gfp_flags, int node),
    105
    106	TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
    107);
    108
    109DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
    110
    111	TP_PROTO(unsigned long call_site, const void *ptr,
    112		 size_t bytes_req, size_t bytes_alloc,
    113		 gfp_t gfp_flags, int node),
    114
    115	TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
    116);
    117
    118TRACE_EVENT(kfree,
    119
    120	TP_PROTO(unsigned long call_site, const void *ptr),
    121
    122	TP_ARGS(call_site, ptr),
    123
    124	TP_STRUCT__entry(
    125		__field(	unsigned long,	call_site	)
    126		__field(	const void *,	ptr		)
    127	),
    128
    129	TP_fast_assign(
    130		__entry->call_site	= call_site;
    131		__entry->ptr		= ptr;
    132	),
    133
    134	TP_printk("call_site=%pS ptr=%p",
    135		  (void *)__entry->call_site, __entry->ptr)
    136);
    137
    138TRACE_EVENT(kmem_cache_free,
    139
    140	TP_PROTO(unsigned long call_site, const void *ptr, const char *name),
    141
    142	TP_ARGS(call_site, ptr, name),
    143
    144	TP_STRUCT__entry(
    145		__field(	unsigned long,	call_site	)
    146		__field(	const void *,	ptr		)
    147		__string(	name,	name	)
    148	),
    149
    150	TP_fast_assign(
    151		__entry->call_site	= call_site;
    152		__entry->ptr		= ptr;
    153		__assign_str(name, name);
    154	),
    155
    156	TP_printk("call_site=%pS ptr=%p name=%s",
    157		  (void *)__entry->call_site, __entry->ptr, __get_str(name))
    158);
    159
    160TRACE_EVENT(mm_page_free,
    161
    162	TP_PROTO(struct page *page, unsigned int order),
    163
    164	TP_ARGS(page, order),
    165
    166	TP_STRUCT__entry(
    167		__field(	unsigned long,	pfn		)
    168		__field(	unsigned int,	order		)
    169	),
    170
    171	TP_fast_assign(
    172		__entry->pfn		= page_to_pfn(page);
    173		__entry->order		= order;
    174	),
    175
    176	TP_printk("page=%p pfn=0x%lx order=%d",
    177			pfn_to_page(__entry->pfn),
    178			__entry->pfn,
    179			__entry->order)
    180);
    181
    182TRACE_EVENT(mm_page_free_batched,
    183
    184	TP_PROTO(struct page *page),
    185
    186	TP_ARGS(page),
    187
    188	TP_STRUCT__entry(
    189		__field(	unsigned long,	pfn		)
    190	),
    191
    192	TP_fast_assign(
    193		__entry->pfn		= page_to_pfn(page);
    194	),
    195
    196	TP_printk("page=%p pfn=0x%lx order=0",
    197			pfn_to_page(__entry->pfn),
    198			__entry->pfn)
    199);
    200
    201TRACE_EVENT(mm_page_alloc,
    202
    203	TP_PROTO(struct page *page, unsigned int order,
    204			gfp_t gfp_flags, int migratetype),
    205
    206	TP_ARGS(page, order, gfp_flags, migratetype),
    207
    208	TP_STRUCT__entry(
    209		__field(	unsigned long,	pfn		)
    210		__field(	unsigned int,	order		)
    211		__field(	unsigned long,	gfp_flags	)
    212		__field(	int,		migratetype	)
    213	),
    214
    215	TP_fast_assign(
    216		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
    217		__entry->order		= order;
    218		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
    219		__entry->migratetype	= migratetype;
    220	),
    221
    222	TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d gfp_flags=%s",
    223		__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
    224		__entry->pfn != -1UL ? __entry->pfn : 0,
    225		__entry->order,
    226		__entry->migratetype,
    227		show_gfp_flags(__entry->gfp_flags))
    228);
    229
    230DECLARE_EVENT_CLASS(mm_page,
    231
    232	TP_PROTO(struct page *page, unsigned int order, int migratetype,
    233		 int percpu_refill),
    234
    235	TP_ARGS(page, order, migratetype, percpu_refill),
    236
    237	TP_STRUCT__entry(
    238		__field(	unsigned long,	pfn		)
    239		__field(	unsigned int,	order		)
    240		__field(	int,		migratetype	)
    241		__field(	int,		percpu_refill	)
    242	),
    243
    244	TP_fast_assign(
    245		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
    246		__entry->order		= order;
    247		__entry->migratetype	= migratetype;
    248		__entry->percpu_refill	= percpu_refill;
    249	),
    250
    251	TP_printk("page=%p pfn=0x%lx order=%u migratetype=%d percpu_refill=%d",
    252		__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
    253		__entry->pfn != -1UL ? __entry->pfn : 0,
    254		__entry->order,
    255		__entry->migratetype,
    256		__entry->percpu_refill)
    257);
    258
    259DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
    260
    261	TP_PROTO(struct page *page, unsigned int order, int migratetype,
    262		 int percpu_refill),
    263
    264	TP_ARGS(page, order, migratetype, percpu_refill)
    265);
    266
    267TRACE_EVENT(mm_page_pcpu_drain,
    268
    269	TP_PROTO(struct page *page, unsigned int order, int migratetype),
    270
    271	TP_ARGS(page, order, migratetype),
    272
    273	TP_STRUCT__entry(
    274		__field(	unsigned long,	pfn		)
    275		__field(	unsigned int,	order		)
    276		__field(	int,		migratetype	)
    277	),
    278
    279	TP_fast_assign(
    280		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
    281		__entry->order		= order;
    282		__entry->migratetype	= migratetype;
    283	),
    284
    285	TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d",
    286		pfn_to_page(__entry->pfn), __entry->pfn,
    287		__entry->order, __entry->migratetype)
    288);
    289
    290TRACE_EVENT(mm_page_alloc_extfrag,
    291
    292	TP_PROTO(struct page *page,
    293		int alloc_order, int fallback_order,
    294		int alloc_migratetype, int fallback_migratetype),
    295
    296	TP_ARGS(page,
    297		alloc_order, fallback_order,
    298		alloc_migratetype, fallback_migratetype),
    299
    300	TP_STRUCT__entry(
    301		__field(	unsigned long,	pfn			)
    302		__field(	int,		alloc_order		)
    303		__field(	int,		fallback_order		)
    304		__field(	int,		alloc_migratetype	)
    305		__field(	int,		fallback_migratetype	)
    306		__field(	int,		change_ownership	)
    307	),
    308
    309	TP_fast_assign(
    310		__entry->pfn			= page_to_pfn(page);
    311		__entry->alloc_order		= alloc_order;
    312		__entry->fallback_order		= fallback_order;
    313		__entry->alloc_migratetype	= alloc_migratetype;
    314		__entry->fallback_migratetype	= fallback_migratetype;
    315		__entry->change_ownership	= (alloc_migratetype ==
    316					get_pageblock_migratetype(page));
    317	),
    318
    319	TP_printk("page=%p pfn=0x%lx alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
    320		pfn_to_page(__entry->pfn),
    321		__entry->pfn,
    322		__entry->alloc_order,
    323		__entry->fallback_order,
    324		pageblock_order,
    325		__entry->alloc_migratetype,
    326		__entry->fallback_migratetype,
    327		__entry->fallback_order < pageblock_order,
    328		__entry->change_ownership)
    329);
    330
    331/*
    332 * Required for uniquely and securely identifying mm in rss_stat tracepoint.
    333 */
    334#ifndef __PTR_TO_HASHVAL
    335static unsigned int __maybe_unused mm_ptr_to_hash(const void *ptr)
    336{
    337	int ret;
    338	unsigned long hashval;
    339
    340	ret = ptr_to_hashval(ptr, &hashval);
    341	if (ret)
    342		return 0;
    343
    344	/* The hashed value is only 32-bit */
    345	return (unsigned int)hashval;
    346}
    347#define __PTR_TO_HASHVAL
    348#endif
    349
    350#define TRACE_MM_PAGES		\
    351	EM(MM_FILEPAGES)	\
    352	EM(MM_ANONPAGES)	\
    353	EM(MM_SWAPENTS)		\
    354	EMe(MM_SHMEMPAGES)
    355
    356#undef EM
    357#undef EMe
    358
    359#define EM(a)	TRACE_DEFINE_ENUM(a);
    360#define EMe(a)	TRACE_DEFINE_ENUM(a);
    361
    362TRACE_MM_PAGES
    363
    364#undef EM
    365#undef EMe
    366
    367#define EM(a)	{ a, #a },
    368#define EMe(a)	{ a, #a }
    369
    370TRACE_EVENT(rss_stat,
    371
    372	TP_PROTO(struct mm_struct *mm,
    373		int member,
    374		long count),
    375
    376	TP_ARGS(mm, member, count),
    377
    378	TP_STRUCT__entry(
    379		__field(unsigned int, mm_id)
    380		__field(unsigned int, curr)
    381		__field(int, member)
    382		__field(long, size)
    383	),
    384
    385	TP_fast_assign(
    386		__entry->mm_id = mm_ptr_to_hash(mm);
    387		__entry->curr = !!(current->mm == mm);
    388		__entry->member = member;
    389		__entry->size = (count << PAGE_SHIFT);
    390	),
    391
    392	TP_printk("mm_id=%u curr=%d type=%s size=%ldB",
    393		__entry->mm_id,
    394		__entry->curr,
    395		__print_symbolic(__entry->member, TRACE_MM_PAGES),
    396		__entry->size)
    397	);
    398#endif /* _TRACE_KMEM_H */
    399
    400/* This part must be outside protection */
    401#include <trace/define_trace.h>