trace_probe_tmpl.h (5660B)
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Traceprobe fetch helper inlines 4 */ 5 6static nokprobe_inline void 7fetch_store_raw(unsigned long val, struct fetch_insn *code, void *buf) 8{ 9 switch (code->size) { 10 case 1: 11 *(u8 *)buf = (u8)val; 12 break; 13 case 2: 14 *(u16 *)buf = (u16)val; 15 break; 16 case 4: 17 *(u32 *)buf = (u32)val; 18 break; 19 case 8: 20 //TBD: 32bit signed 21 *(u64 *)buf = (u64)val; 22 break; 23 default: 24 *(unsigned long *)buf = val; 25 } 26} 27 28static nokprobe_inline void 29fetch_apply_bitfield(struct fetch_insn *code, void *buf) 30{ 31 switch (code->basesize) { 32 case 1: 33 *(u8 *)buf <<= code->lshift; 34 *(u8 *)buf >>= code->rshift; 35 break; 36 case 2: 37 *(u16 *)buf <<= code->lshift; 38 *(u16 *)buf >>= code->rshift; 39 break; 40 case 4: 41 *(u32 *)buf <<= code->lshift; 42 *(u32 *)buf >>= code->rshift; 43 break; 44 case 8: 45 *(u64 *)buf <<= code->lshift; 46 *(u64 *)buf >>= code->rshift; 47 break; 48 } 49} 50 51/* 52 * These functions must be defined for each callsite. 53 * Return consumed dynamic data size (>= 0), or error (< 0). 54 * If dest is NULL, don't store result and return required dynamic data size. 55 */ 56static int 57process_fetch_insn(struct fetch_insn *code, void *rec, 58 void *dest, void *base); 59static nokprobe_inline int fetch_store_strlen(unsigned long addr); 60static nokprobe_inline int 61fetch_store_string(unsigned long addr, void *dest, void *base); 62static nokprobe_inline int fetch_store_strlen_user(unsigned long addr); 63static nokprobe_inline int 64fetch_store_string_user(unsigned long addr, void *dest, void *base); 65static nokprobe_inline int 66probe_mem_read(void *dest, void *src, size_t size); 67static nokprobe_inline int 68probe_mem_read_user(void *dest, void *src, size_t size); 69 70/* From the 2nd stage, routine is same */ 71static nokprobe_inline int 72process_fetch_insn_bottom(struct fetch_insn *code, unsigned long val, 73 void *dest, void *base) 74{ 75 struct fetch_insn *s3 = NULL; 76 int total = 0, ret = 0, i = 0; 77 u32 loc = 0; 78 unsigned long lval = val; 79 80stage2: 81 /* 2nd stage: dereference memory if needed */ 82 do { 83 if (code->op == FETCH_OP_DEREF) { 84 lval = val; 85 ret = probe_mem_read(&val, (void *)val + code->offset, 86 sizeof(val)); 87 } else if (code->op == FETCH_OP_UDEREF) { 88 lval = val; 89 ret = probe_mem_read_user(&val, 90 (void *)val + code->offset, sizeof(val)); 91 } else 92 break; 93 if (ret) 94 return ret; 95 code++; 96 } while (1); 97 98 s3 = code; 99stage3: 100 /* 3rd stage: store value to buffer */ 101 if (unlikely(!dest)) { 102 if (code->op == FETCH_OP_ST_STRING) { 103 ret = fetch_store_strlen(val + code->offset); 104 code++; 105 goto array; 106 } else if (code->op == FETCH_OP_ST_USTRING) { 107 ret += fetch_store_strlen_user(val + code->offset); 108 code++; 109 goto array; 110 } else 111 return -EILSEQ; 112 } 113 114 switch (code->op) { 115 case FETCH_OP_ST_RAW: 116 fetch_store_raw(val, code, dest); 117 break; 118 case FETCH_OP_ST_MEM: 119 probe_mem_read(dest, (void *)val + code->offset, code->size); 120 break; 121 case FETCH_OP_ST_UMEM: 122 probe_mem_read_user(dest, (void *)val + code->offset, code->size); 123 break; 124 case FETCH_OP_ST_STRING: 125 loc = *(u32 *)dest; 126 ret = fetch_store_string(val + code->offset, dest, base); 127 break; 128 case FETCH_OP_ST_USTRING: 129 loc = *(u32 *)dest; 130 ret = fetch_store_string_user(val + code->offset, dest, base); 131 break; 132 default: 133 return -EILSEQ; 134 } 135 code++; 136 137 /* 4th stage: modify stored value if needed */ 138 if (code->op == FETCH_OP_MOD_BF) { 139 fetch_apply_bitfield(code, dest); 140 code++; 141 } 142 143array: 144 /* the last stage: Loop on array */ 145 if (code->op == FETCH_OP_LP_ARRAY) { 146 total += ret; 147 if (++i < code->param) { 148 code = s3; 149 if (s3->op != FETCH_OP_ST_STRING && 150 s3->op != FETCH_OP_ST_USTRING) { 151 dest += s3->size; 152 val += s3->size; 153 goto stage3; 154 } 155 code--; 156 val = lval + sizeof(char *); 157 if (dest) { 158 dest += sizeof(u32); 159 *(u32 *)dest = update_data_loc(loc, ret); 160 } 161 goto stage2; 162 } 163 code++; 164 ret = total; 165 } 166 167 return code->op == FETCH_OP_END ? ret : -EILSEQ; 168} 169 170/* Sum up total data length for dynamic arrays (strings) */ 171static nokprobe_inline int 172__get_data_size(struct trace_probe *tp, struct pt_regs *regs) 173{ 174 struct probe_arg *arg; 175 int i, len, ret = 0; 176 177 for (i = 0; i < tp->nr_args; i++) { 178 arg = tp->args + i; 179 if (unlikely(arg->dynamic)) { 180 len = process_fetch_insn(arg->code, regs, NULL, NULL); 181 if (len > 0) 182 ret += len; 183 } 184 } 185 186 return ret; 187} 188 189/* Store the value of each argument */ 190static nokprobe_inline void 191store_trace_args(void *data, struct trace_probe *tp, void *rec, 192 int header_size, int maxlen) 193{ 194 struct probe_arg *arg; 195 void *base = data - header_size; 196 void *dyndata = data + tp->size; 197 u32 *dl; /* Data location */ 198 int ret, i; 199 200 for (i = 0; i < tp->nr_args; i++) { 201 arg = tp->args + i; 202 dl = data + arg->offset; 203 /* Point the dynamic data area if needed */ 204 if (unlikely(arg->dynamic)) 205 *dl = make_data_loc(maxlen, dyndata - base); 206 ret = process_fetch_insn(arg->code, rec, dl, base); 207 if (unlikely(ret < 0 && arg->dynamic)) { 208 *dl = make_data_loc(0, dyndata - base); 209 } else { 210 dyndata += ret; 211 maxlen -= ret; 212 } 213 } 214} 215 216static inline int 217print_probe_args(struct trace_seq *s, struct probe_arg *args, int nr_args, 218 u8 *data, void *field) 219{ 220 void *p; 221 int i, j; 222 223 for (i = 0; i < nr_args; i++) { 224 struct probe_arg *a = args + i; 225 226 trace_seq_printf(s, " %s=", a->name); 227 if (likely(!a->count)) { 228 if (!a->type->print(s, data + a->offset, field)) 229 return -ENOMEM; 230 continue; 231 } 232 trace_seq_putc(s, '{'); 233 p = data + a->offset; 234 for (j = 0; j < a->count; j++) { 235 if (!a->type->print(s, p, field)) 236 return -ENOMEM; 237 trace_seq_putc(s, j == a->count - 1 ? '}' : ','); 238 p += a->type->size; 239 } 240 } 241 return 0; 242}