binfmt_flat.c (26377B)
1// SPDX-License-Identifier: GPL-2.0 2/****************************************************************************/ 3/* 4 * linux/fs/binfmt_flat.c 5 * 6 * Copyright (C) 2000-2003 David McCullough <davidm@snapgear.com> 7 * Copyright (C) 2002 Greg Ungerer <gerg@snapgear.com> 8 * Copyright (C) 2002 SnapGear, by Paul Dale <pauli@snapgear.com> 9 * Copyright (C) 2000, 2001 Lineo, by David McCullough <davidm@lineo.com> 10 * based heavily on: 11 * 12 * linux/fs/binfmt_aout.c: 13 * Copyright (C) 1991, 1992, 1996 Linus Torvalds 14 * linux/fs/binfmt_flat.c for 2.0 kernel 15 * Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com> 16 * JAN/99 -- coded full program relocation (gerg@snapgear.com) 17 */ 18 19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20 21#include <linux/kernel.h> 22#include <linux/sched.h> 23#include <linux/sched/task_stack.h> 24#include <linux/mm.h> 25#include <linux/mman.h> 26#include <linux/errno.h> 27#include <linux/signal.h> 28#include <linux/string.h> 29#include <linux/fs.h> 30#include <linux/file.h> 31#include <linux/ptrace.h> 32#include <linux/user.h> 33#include <linux/slab.h> 34#include <linux/binfmts.h> 35#include <linux/personality.h> 36#include <linux/init.h> 37#include <linux/flat.h> 38#include <linux/uaccess.h> 39#include <linux/vmalloc.h> 40 41#include <asm/byteorder.h> 42#include <asm/unaligned.h> 43#include <asm/cacheflush.h> 44#include <asm/page.h> 45#include <asm/flat.h> 46 47#ifndef flat_get_relocate_addr 48#define flat_get_relocate_addr(rel) (rel) 49#endif 50 51/****************************************************************************/ 52 53/* 54 * User data (data section and bss) needs to be aligned. 55 * We pick 0x20 here because it is the max value elf2flt has always 56 * used in producing FLAT files, and because it seems to be large 57 * enough to make all the gcc alignment related tests happy. 58 */ 59#define FLAT_DATA_ALIGN (0x20) 60 61/* 62 * User data (stack) also needs to be aligned. 63 * Here we can be a bit looser than the data sections since this 64 * needs to only meet arch ABI requirements. 65 */ 66#define FLAT_STACK_ALIGN max_t(unsigned long, sizeof(void *), ARCH_SLAB_MINALIGN) 67 68#define RELOC_FAILED 0xff00ff01 /* Relocation incorrect somewhere */ 69#define UNLOADED_LIB 0x7ff000ff /* Placeholder for unused library */ 70 71#define MAX_SHARED_LIBS (1) 72 73#ifdef CONFIG_BINFMT_FLAT_NO_DATA_START_OFFSET 74#define DATA_START_OFFSET_WORDS (0) 75#else 76#define DATA_START_OFFSET_WORDS (MAX_SHARED_LIBS) 77#endif 78 79struct lib_info { 80 struct { 81 unsigned long start_code; /* Start of text segment */ 82 unsigned long start_data; /* Start of data segment */ 83 unsigned long start_brk; /* End of data segment */ 84 unsigned long text_len; /* Length of text segment */ 85 unsigned long entry; /* Start address for this module */ 86 unsigned long build_date; /* When this one was compiled */ 87 bool loaded; /* Has this library been loaded? */ 88 } lib_list[MAX_SHARED_LIBS]; 89}; 90 91static int load_flat_binary(struct linux_binprm *); 92 93static struct linux_binfmt flat_format = { 94 .module = THIS_MODULE, 95 .load_binary = load_flat_binary, 96}; 97 98 99/****************************************************************************/ 100/* 101 * create_flat_tables() parses the env- and arg-strings in new user 102 * memory and creates the pointer tables from them, and puts their 103 * addresses on the "stack", recording the new stack pointer value. 104 */ 105 106static int create_flat_tables(struct linux_binprm *bprm, unsigned long arg_start) 107{ 108 char __user *p; 109 unsigned long __user *sp; 110 long i, len; 111 112 p = (char __user *)arg_start; 113 sp = (unsigned long __user *)current->mm->start_stack; 114 115 sp -= bprm->envc + 1; 116 sp -= bprm->argc + 1; 117 if (IS_ENABLED(CONFIG_BINFMT_FLAT_ARGVP_ENVP_ON_STACK)) 118 sp -= 2; /* argvp + envp */ 119 sp -= 1; /* &argc */ 120 121 current->mm->start_stack = (unsigned long)sp & -FLAT_STACK_ALIGN; 122 sp = (unsigned long __user *)current->mm->start_stack; 123 124 if (put_user(bprm->argc, sp++)) 125 return -EFAULT; 126 if (IS_ENABLED(CONFIG_BINFMT_FLAT_ARGVP_ENVP_ON_STACK)) { 127 unsigned long argv, envp; 128 argv = (unsigned long)(sp + 2); 129 envp = (unsigned long)(sp + 2 + bprm->argc + 1); 130 if (put_user(argv, sp++) || put_user(envp, sp++)) 131 return -EFAULT; 132 } 133 134 current->mm->arg_start = (unsigned long)p; 135 for (i = bprm->argc; i > 0; i--) { 136 if (put_user((unsigned long)p, sp++)) 137 return -EFAULT; 138 len = strnlen_user(p, MAX_ARG_STRLEN); 139 if (!len || len > MAX_ARG_STRLEN) 140 return -EINVAL; 141 p += len; 142 } 143 if (put_user(0, sp++)) 144 return -EFAULT; 145 current->mm->arg_end = (unsigned long)p; 146 147 current->mm->env_start = (unsigned long) p; 148 for (i = bprm->envc; i > 0; i--) { 149 if (put_user((unsigned long)p, sp++)) 150 return -EFAULT; 151 len = strnlen_user(p, MAX_ARG_STRLEN); 152 if (!len || len > MAX_ARG_STRLEN) 153 return -EINVAL; 154 p += len; 155 } 156 if (put_user(0, sp++)) 157 return -EFAULT; 158 current->mm->env_end = (unsigned long)p; 159 160 return 0; 161} 162 163/****************************************************************************/ 164 165#ifdef CONFIG_BINFMT_ZFLAT 166 167#include <linux/zlib.h> 168 169#define LBUFSIZE 4000 170 171/* gzip flag byte */ 172#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */ 173#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */ 174#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ 175#define ORIG_NAME 0x08 /* bit 3 set: original file name present */ 176#define COMMENT 0x10 /* bit 4 set: file comment present */ 177#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ 178#define RESERVED 0xC0 /* bit 6,7: reserved */ 179 180static int decompress_exec(struct linux_binprm *bprm, loff_t fpos, char *dst, 181 long len, int fd) 182{ 183 unsigned char *buf; 184 z_stream strm; 185 int ret, retval; 186 187 pr_debug("decompress_exec(offset=%llx,buf=%p,len=%lx)\n", fpos, dst, len); 188 189 memset(&strm, 0, sizeof(strm)); 190 strm.workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL); 191 if (!strm.workspace) 192 return -ENOMEM; 193 194 buf = kmalloc(LBUFSIZE, GFP_KERNEL); 195 if (!buf) { 196 retval = -ENOMEM; 197 goto out_free; 198 } 199 200 /* Read in first chunk of data and parse gzip header. */ 201 ret = kernel_read(bprm->file, buf, LBUFSIZE, &fpos); 202 203 strm.next_in = buf; 204 strm.avail_in = ret; 205 strm.total_in = 0; 206 207 retval = -ENOEXEC; 208 209 /* Check minimum size -- gzip header */ 210 if (ret < 10) { 211 pr_debug("file too small?\n"); 212 goto out_free_buf; 213 } 214 215 /* Check gzip magic number */ 216 if ((buf[0] != 037) || ((buf[1] != 0213) && (buf[1] != 0236))) { 217 pr_debug("unknown compression magic?\n"); 218 goto out_free_buf; 219 } 220 221 /* Check gzip method */ 222 if (buf[2] != 8) { 223 pr_debug("unknown compression method?\n"); 224 goto out_free_buf; 225 } 226 /* Check gzip flags */ 227 if ((buf[3] & ENCRYPTED) || (buf[3] & CONTINUATION) || 228 (buf[3] & RESERVED)) { 229 pr_debug("unknown flags?\n"); 230 goto out_free_buf; 231 } 232 233 ret = 10; 234 if (buf[3] & EXTRA_FIELD) { 235 ret += 2 + buf[10] + (buf[11] << 8); 236 if (unlikely(ret >= LBUFSIZE)) { 237 pr_debug("buffer overflow (EXTRA)?\n"); 238 goto out_free_buf; 239 } 240 } 241 if (buf[3] & ORIG_NAME) { 242 while (ret < LBUFSIZE && buf[ret++] != 0) 243 ; 244 if (unlikely(ret == LBUFSIZE)) { 245 pr_debug("buffer overflow (ORIG_NAME)?\n"); 246 goto out_free_buf; 247 } 248 } 249 if (buf[3] & COMMENT) { 250 while (ret < LBUFSIZE && buf[ret++] != 0) 251 ; 252 if (unlikely(ret == LBUFSIZE)) { 253 pr_debug("buffer overflow (COMMENT)?\n"); 254 goto out_free_buf; 255 } 256 } 257 258 strm.next_in += ret; 259 strm.avail_in -= ret; 260 261 strm.next_out = dst; 262 strm.avail_out = len; 263 strm.total_out = 0; 264 265 if (zlib_inflateInit2(&strm, -MAX_WBITS) != Z_OK) { 266 pr_debug("zlib init failed?\n"); 267 goto out_free_buf; 268 } 269 270 while ((ret = zlib_inflate(&strm, Z_NO_FLUSH)) == Z_OK) { 271 ret = kernel_read(bprm->file, buf, LBUFSIZE, &fpos); 272 if (ret <= 0) 273 break; 274 len -= ret; 275 276 strm.next_in = buf; 277 strm.avail_in = ret; 278 strm.total_in = 0; 279 } 280 281 if (ret < 0) { 282 pr_debug("decompression failed (%d), %s\n", 283 ret, strm.msg); 284 goto out_zlib; 285 } 286 287 retval = 0; 288out_zlib: 289 zlib_inflateEnd(&strm); 290out_free_buf: 291 kfree(buf); 292out_free: 293 kfree(strm.workspace); 294 return retval; 295} 296 297#endif /* CONFIG_BINFMT_ZFLAT */ 298 299/****************************************************************************/ 300 301static unsigned long 302calc_reloc(unsigned long r, struct lib_info *p) 303{ 304 unsigned long addr; 305 unsigned long start_brk; 306 unsigned long start_data; 307 unsigned long text_len; 308 unsigned long start_code; 309 310 start_brk = p->lib_list[0].start_brk; 311 start_data = p->lib_list[0].start_data; 312 start_code = p->lib_list[0].start_code; 313 text_len = p->lib_list[0].text_len; 314 315 if (r > start_brk - start_data + text_len) { 316 pr_err("reloc outside program 0x%lx (0 - 0x%lx/0x%lx)", 317 r, start_brk-start_data+text_len, text_len); 318 goto failed; 319 } 320 321 if (r < text_len) /* In text segment */ 322 addr = r + start_code; 323 else /* In data segment */ 324 addr = r - text_len + start_data; 325 326 /* Range checked already above so doing the range tests is redundant...*/ 327 return addr; 328 329failed: 330 pr_cont(", killing %s!\n", current->comm); 331 send_sig(SIGSEGV, current, 0); 332 333 return RELOC_FAILED; 334} 335 336/****************************************************************************/ 337 338#ifdef CONFIG_BINFMT_FLAT_OLD 339static void old_reloc(unsigned long rl) 340{ 341 static const char *segment[] = { "TEXT", "DATA", "BSS", "*UNKNOWN*" }; 342 flat_v2_reloc_t r; 343 unsigned long __user *ptr; 344 unsigned long val; 345 346 r.value = rl; 347#if defined(CONFIG_COLDFIRE) 348 ptr = (unsigned long __user *)(current->mm->start_code + r.reloc.offset); 349#else 350 ptr = (unsigned long __user *)(current->mm->start_data + r.reloc.offset); 351#endif 352 get_user(val, ptr); 353 354 pr_debug("Relocation of variable at DATASEG+%x " 355 "(address %p, currently %lx) into segment %s\n", 356 r.reloc.offset, ptr, val, segment[r.reloc.type]); 357 358 switch (r.reloc.type) { 359 case OLD_FLAT_RELOC_TYPE_TEXT: 360 val += current->mm->start_code; 361 break; 362 case OLD_FLAT_RELOC_TYPE_DATA: 363 val += current->mm->start_data; 364 break; 365 case OLD_FLAT_RELOC_TYPE_BSS: 366 val += current->mm->end_data; 367 break; 368 default: 369 pr_err("Unknown relocation type=%x\n", r.reloc.type); 370 break; 371 } 372 put_user(val, ptr); 373 374 pr_debug("Relocation became %lx\n", val); 375} 376#endif /* CONFIG_BINFMT_FLAT_OLD */ 377 378/****************************************************************************/ 379 380static inline u32 __user *skip_got_header(u32 __user *rp) 381{ 382 if (IS_ENABLED(CONFIG_RISCV)) { 383 /* 384 * RISC-V has a 16 byte GOT PLT header for elf64-riscv 385 * and 8 byte GOT PLT header for elf32-riscv. 386 * Skip the whole GOT PLT header, since it is reserved 387 * for the dynamic linker (ld.so). 388 */ 389 u32 rp_val0, rp_val1; 390 391 if (get_user(rp_val0, rp)) 392 return rp; 393 if (get_user(rp_val1, rp + 1)) 394 return rp; 395 396 if (rp_val0 == 0xffffffff && rp_val1 == 0xffffffff) 397 rp += 4; 398 else if (rp_val0 == 0xffffffff) 399 rp += 2; 400 } 401 return rp; 402} 403 404static int load_flat_file(struct linux_binprm *bprm, 405 struct lib_info *libinfo, unsigned long *extra_stack) 406{ 407 struct flat_hdr *hdr; 408 unsigned long textpos, datapos, realdatastart; 409 u32 text_len, data_len, bss_len, stack_len, full_data, flags; 410 unsigned long len, memp, memp_size, extra, rlim; 411 __be32 __user *reloc; 412 u32 __user *rp; 413 int i, rev, relocs; 414 loff_t fpos; 415 unsigned long start_code, end_code; 416 ssize_t result; 417 int ret; 418 419 hdr = ((struct flat_hdr *) bprm->buf); /* exec-header */ 420 421 text_len = ntohl(hdr->data_start); 422 data_len = ntohl(hdr->data_end) - ntohl(hdr->data_start); 423 bss_len = ntohl(hdr->bss_end) - ntohl(hdr->data_end); 424 stack_len = ntohl(hdr->stack_size); 425 if (extra_stack) { 426 stack_len += *extra_stack; 427 *extra_stack = stack_len; 428 } 429 relocs = ntohl(hdr->reloc_count); 430 flags = ntohl(hdr->flags); 431 rev = ntohl(hdr->rev); 432 full_data = data_len + relocs * sizeof(unsigned long); 433 434 if (strncmp(hdr->magic, "bFLT", 4)) { 435 /* 436 * Previously, here was a printk to tell people 437 * "BINFMT_FLAT: bad header magic". 438 * But for the kernel which also use ELF FD-PIC format, this 439 * error message is confusing. 440 * because a lot of people do not manage to produce good 441 */ 442 ret = -ENOEXEC; 443 goto err; 444 } 445 446 if (flags & FLAT_FLAG_KTRACE) 447 pr_info("Loading file: %s\n", bprm->filename); 448 449#ifdef CONFIG_BINFMT_FLAT_OLD 450 if (rev != FLAT_VERSION && rev != OLD_FLAT_VERSION) { 451 pr_err("bad flat file version 0x%x (supported 0x%lx and 0x%lx)\n", 452 rev, FLAT_VERSION, OLD_FLAT_VERSION); 453 ret = -ENOEXEC; 454 goto err; 455 } 456 457 /* 458 * fix up the flags for the older format, there were all kinds 459 * of endian hacks, this only works for the simple cases 460 */ 461 if (rev == OLD_FLAT_VERSION && 462 (flags || IS_ENABLED(CONFIG_BINFMT_FLAT_OLD_ALWAYS_RAM))) 463 flags = FLAT_FLAG_RAM; 464 465#else /* CONFIG_BINFMT_FLAT_OLD */ 466 if (rev != FLAT_VERSION) { 467 pr_err("bad flat file version 0x%x (supported 0x%lx)\n", 468 rev, FLAT_VERSION); 469 ret = -ENOEXEC; 470 goto err; 471 } 472#endif /* !CONFIG_BINFMT_FLAT_OLD */ 473 474 /* 475 * Make sure the header params are sane. 476 * 28 bits (256 MB) is way more than reasonable in this case. 477 * If some top bits are set we have probable binary corruption. 478 */ 479 if ((text_len | data_len | bss_len | stack_len | full_data) >> 28) { 480 pr_err("bad header\n"); 481 ret = -ENOEXEC; 482 goto err; 483 } 484 485#ifndef CONFIG_BINFMT_ZFLAT 486 if (flags & (FLAT_FLAG_GZIP|FLAT_FLAG_GZDATA)) { 487 pr_err("Support for ZFLAT executables is not enabled.\n"); 488 ret = -ENOEXEC; 489 goto err; 490 } 491#endif 492 493 /* 494 * Check initial limits. This avoids letting people circumvent 495 * size limits imposed on them by creating programs with large 496 * arrays in the data or bss. 497 */ 498 rlim = rlimit(RLIMIT_DATA); 499 if (rlim >= RLIM_INFINITY) 500 rlim = ~0; 501 if (data_len + bss_len > rlim) { 502 ret = -ENOMEM; 503 goto err; 504 } 505 506 /* Flush all traces of the currently running executable */ 507 ret = begin_new_exec(bprm); 508 if (ret) 509 goto err; 510 511 /* OK, This is the point of no return */ 512 set_personality(PER_LINUX_32BIT); 513 setup_new_exec(bprm); 514 515 /* 516 * calculate the extra space we need to map in 517 */ 518 extra = max_t(unsigned long, bss_len + stack_len, 519 relocs * sizeof(unsigned long)); 520 521 /* 522 * there are a couple of cases here, the separate code/data 523 * case, and then the fully copied to RAM case which lumps 524 * it all together. 525 */ 526 if (!IS_ENABLED(CONFIG_MMU) && !(flags & (FLAT_FLAG_RAM|FLAT_FLAG_GZIP))) { 527 /* 528 * this should give us a ROM ptr, but if it doesn't we don't 529 * really care 530 */ 531 pr_debug("ROM mapping of file (we hope)\n"); 532 533 textpos = vm_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC, 534 MAP_PRIVATE, 0); 535 if (!textpos || IS_ERR_VALUE(textpos)) { 536 ret = textpos; 537 if (!textpos) 538 ret = -ENOMEM; 539 pr_err("Unable to mmap process text, errno %d\n", ret); 540 goto err; 541 } 542 543 len = data_len + extra + 544 DATA_START_OFFSET_WORDS * sizeof(unsigned long); 545 len = PAGE_ALIGN(len); 546 realdatastart = vm_mmap(NULL, 0, len, 547 PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0); 548 549 if (realdatastart == 0 || IS_ERR_VALUE(realdatastart)) { 550 ret = realdatastart; 551 if (!realdatastart) 552 ret = -ENOMEM; 553 pr_err("Unable to allocate RAM for process data, " 554 "errno %d\n", ret); 555 vm_munmap(textpos, text_len); 556 goto err; 557 } 558 datapos = ALIGN(realdatastart + 559 DATA_START_OFFSET_WORDS * sizeof(unsigned long), 560 FLAT_DATA_ALIGN); 561 562 pr_debug("Allocated data+bss+stack (%u bytes): %lx\n", 563 data_len + bss_len + stack_len, datapos); 564 565 fpos = ntohl(hdr->data_start); 566#ifdef CONFIG_BINFMT_ZFLAT 567 if (flags & FLAT_FLAG_GZDATA) { 568 result = decompress_exec(bprm, fpos, (char *)datapos, 569 full_data, 0); 570 } else 571#endif 572 { 573 result = read_code(bprm->file, datapos, fpos, 574 full_data); 575 } 576 if (IS_ERR_VALUE(result)) { 577 ret = result; 578 pr_err("Unable to read data+bss, errno %d\n", ret); 579 vm_munmap(textpos, text_len); 580 vm_munmap(realdatastart, len); 581 goto err; 582 } 583 584 reloc = (__be32 __user *) 585 (datapos + (ntohl(hdr->reloc_start) - text_len)); 586 memp = realdatastart; 587 memp_size = len; 588 } else { 589 590 len = text_len + data_len + extra + 591 DATA_START_OFFSET_WORDS * sizeof(u32); 592 len = PAGE_ALIGN(len); 593 textpos = vm_mmap(NULL, 0, len, 594 PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0); 595 596 if (!textpos || IS_ERR_VALUE(textpos)) { 597 ret = textpos; 598 if (!textpos) 599 ret = -ENOMEM; 600 pr_err("Unable to allocate RAM for process text/data, " 601 "errno %d\n", ret); 602 goto err; 603 } 604 605 realdatastart = textpos + ntohl(hdr->data_start); 606 datapos = ALIGN(realdatastart + 607 DATA_START_OFFSET_WORDS * sizeof(u32), 608 FLAT_DATA_ALIGN); 609 610 reloc = (__be32 __user *) 611 (datapos + (ntohl(hdr->reloc_start) - text_len)); 612 memp = textpos; 613 memp_size = len; 614#ifdef CONFIG_BINFMT_ZFLAT 615 /* 616 * load it all in and treat it like a RAM load from now on 617 */ 618 if (flags & FLAT_FLAG_GZIP) { 619#ifndef CONFIG_MMU 620 result = decompress_exec(bprm, sizeof(struct flat_hdr), 621 (((char *)textpos) + sizeof(struct flat_hdr)), 622 (text_len + full_data 623 - sizeof(struct flat_hdr)), 624 0); 625 memmove((void *) datapos, (void *) realdatastart, 626 full_data); 627#else 628 /* 629 * This is used on MMU systems mainly for testing. 630 * Let's use a kernel buffer to simplify things. 631 */ 632 long unz_text_len = text_len - sizeof(struct flat_hdr); 633 long unz_len = unz_text_len + full_data; 634 char *unz_data = vmalloc(unz_len); 635 if (!unz_data) { 636 result = -ENOMEM; 637 } else { 638 result = decompress_exec(bprm, sizeof(struct flat_hdr), 639 unz_data, unz_len, 0); 640 if (result == 0 && 641 (copy_to_user((void __user *)textpos + sizeof(struct flat_hdr), 642 unz_data, unz_text_len) || 643 copy_to_user((void __user *)datapos, 644 unz_data + unz_text_len, full_data))) 645 result = -EFAULT; 646 vfree(unz_data); 647 } 648#endif 649 } else if (flags & FLAT_FLAG_GZDATA) { 650 result = read_code(bprm->file, textpos, 0, text_len); 651 if (!IS_ERR_VALUE(result)) { 652#ifndef CONFIG_MMU 653 result = decompress_exec(bprm, text_len, (char *) datapos, 654 full_data, 0); 655#else 656 char *unz_data = vmalloc(full_data); 657 if (!unz_data) { 658 result = -ENOMEM; 659 } else { 660 result = decompress_exec(bprm, text_len, 661 unz_data, full_data, 0); 662 if (result == 0 && 663 copy_to_user((void __user *)datapos, 664 unz_data, full_data)) 665 result = -EFAULT; 666 vfree(unz_data); 667 } 668#endif 669 } 670 } else 671#endif /* CONFIG_BINFMT_ZFLAT */ 672 { 673 result = read_code(bprm->file, textpos, 0, text_len); 674 if (!IS_ERR_VALUE(result)) 675 result = read_code(bprm->file, datapos, 676 ntohl(hdr->data_start), 677 full_data); 678 } 679 if (IS_ERR_VALUE(result)) { 680 ret = result; 681 pr_err("Unable to read code+data+bss, errno %d\n", ret); 682 vm_munmap(textpos, text_len + data_len + extra + 683 DATA_START_OFFSET_WORDS * sizeof(u32)); 684 goto err; 685 } 686 } 687 688 start_code = textpos + sizeof(struct flat_hdr); 689 end_code = textpos + text_len; 690 text_len -= sizeof(struct flat_hdr); /* the real code len */ 691 692 /* The main program needs a little extra setup in the task structure */ 693 current->mm->start_code = start_code; 694 current->mm->end_code = end_code; 695 current->mm->start_data = datapos; 696 current->mm->end_data = datapos + data_len; 697 /* 698 * set up the brk stuff, uses any slack left in data/bss/stack 699 * allocation. We put the brk after the bss (between the bss 700 * and stack) like other platforms. 701 * Userspace code relies on the stack pointer starting out at 702 * an address right at the end of a page. 703 */ 704 current->mm->start_brk = datapos + data_len + bss_len; 705 current->mm->brk = (current->mm->start_brk + 3) & ~3; 706#ifndef CONFIG_MMU 707 current->mm->context.end_brk = memp + memp_size - stack_len; 708#endif 709 710 if (flags & FLAT_FLAG_KTRACE) { 711 pr_info("Mapping is %lx, Entry point is %x, data_start is %x\n", 712 textpos, 0x00ffffff&ntohl(hdr->entry), ntohl(hdr->data_start)); 713 pr_info("%s %s: TEXT=%lx-%lx DATA=%lx-%lx BSS=%lx-%lx\n", 714 "Load", bprm->filename, 715 start_code, end_code, datapos, datapos + data_len, 716 datapos + data_len, (datapos + data_len + bss_len + 3) & ~3); 717 } 718 719 /* Store the current module values into the global library structure */ 720 libinfo->lib_list[0].start_code = start_code; 721 libinfo->lib_list[0].start_data = datapos; 722 libinfo->lib_list[0].start_brk = datapos + data_len + bss_len; 723 libinfo->lib_list[0].text_len = text_len; 724 libinfo->lib_list[0].loaded = 1; 725 libinfo->lib_list[0].entry = (0x00ffffff & ntohl(hdr->entry)) + textpos; 726 libinfo->lib_list[0].build_date = ntohl(hdr->build_date); 727 728 /* 729 * We just load the allocations into some temporary memory to 730 * help simplify all this mumbo jumbo 731 * 732 * We've got two different sections of relocation entries. 733 * The first is the GOT which resides at the beginning of the data segment 734 * and is terminated with a -1. This one can be relocated in place. 735 * The second is the extra relocation entries tacked after the image's 736 * data segment. These require a little more processing as the entry is 737 * really an offset into the image which contains an offset into the 738 * image. 739 */ 740 if (flags & FLAT_FLAG_GOTPIC) { 741 rp = skip_got_header((u32 __user *) datapos); 742 for (; ; rp++) { 743 u32 addr, rp_val; 744 if (get_user(rp_val, rp)) 745 return -EFAULT; 746 if (rp_val == 0xffffffff) 747 break; 748 if (rp_val) { 749 addr = calc_reloc(rp_val, libinfo); 750 if (addr == RELOC_FAILED) { 751 ret = -ENOEXEC; 752 goto err; 753 } 754 if (put_user(addr, rp)) 755 return -EFAULT; 756 } 757 } 758 } 759 760 /* 761 * Now run through the relocation entries. 762 * We've got to be careful here as C++ produces relocatable zero 763 * entries in the constructor and destructor tables which are then 764 * tested for being not zero (which will always occur unless we're 765 * based from address zero). This causes an endless loop as __start 766 * is at zero. The solution used is to not relocate zero addresses. 767 * This has the negative side effect of not allowing a global data 768 * reference to be statically initialised to _stext (I've moved 769 * __start to address 4 so that is okay). 770 */ 771 if (rev > OLD_FLAT_VERSION) { 772 for (i = 0; i < relocs; i++) { 773 u32 addr, relval; 774 __be32 tmp; 775 776 /* 777 * Get the address of the pointer to be 778 * relocated (of course, the address has to be 779 * relocated first). 780 */ 781 if (get_user(tmp, reloc + i)) 782 return -EFAULT; 783 relval = ntohl(tmp); 784 addr = flat_get_relocate_addr(relval); 785 rp = (u32 __user *)calc_reloc(addr, libinfo); 786 if (rp == (u32 __user *)RELOC_FAILED) { 787 ret = -ENOEXEC; 788 goto err; 789 } 790 791 /* Get the pointer's value. */ 792 ret = flat_get_addr_from_rp(rp, relval, flags, &addr); 793 if (unlikely(ret)) 794 goto err; 795 796 if (addr != 0) { 797 /* 798 * Do the relocation. PIC relocs in the data section are 799 * already in target order 800 */ 801 if ((flags & FLAT_FLAG_GOTPIC) == 0) { 802 /* 803 * Meh, the same value can have a different 804 * byte order based on a flag.. 805 */ 806 addr = ntohl((__force __be32)addr); 807 } 808 addr = calc_reloc(addr, libinfo); 809 if (addr == RELOC_FAILED) { 810 ret = -ENOEXEC; 811 goto err; 812 } 813 814 /* Write back the relocated pointer. */ 815 ret = flat_put_addr_at_rp(rp, addr, relval); 816 if (unlikely(ret)) 817 goto err; 818 } 819 } 820#ifdef CONFIG_BINFMT_FLAT_OLD 821 } else { 822 for (i = 0; i < relocs; i++) { 823 __be32 relval; 824 if (get_user(relval, reloc + i)) 825 return -EFAULT; 826 old_reloc(ntohl(relval)); 827 } 828#endif /* CONFIG_BINFMT_FLAT_OLD */ 829 } 830 831 flush_icache_user_range(start_code, end_code); 832 833 /* zero the BSS, BRK and stack areas */ 834 if (clear_user((void __user *)(datapos + data_len), bss_len + 835 (memp + memp_size - stack_len - /* end brk */ 836 libinfo->lib_list[0].start_brk) + /* start brk */ 837 stack_len)) 838 return -EFAULT; 839 840 return 0; 841err: 842 return ret; 843} 844 845 846/****************************************************************************/ 847 848/* 849 * These are the functions used to load flat style executables and shared 850 * libraries. There is no binary dependent code anywhere else. 851 */ 852 853static int load_flat_binary(struct linux_binprm *bprm) 854{ 855 struct lib_info libinfo; 856 struct pt_regs *regs = current_pt_regs(); 857 unsigned long stack_len = 0; 858 unsigned long start_addr; 859 int res; 860 int i, j; 861 862 memset(&libinfo, 0, sizeof(libinfo)); 863 864 /* 865 * We have to add the size of our arguments to our stack size 866 * otherwise it's too easy for users to create stack overflows 867 * by passing in a huge argument list. And yes, we have to be 868 * pedantic and include space for the argv/envp array as it may have 869 * a lot of entries. 870 */ 871#ifndef CONFIG_MMU 872 stack_len += PAGE_SIZE * MAX_ARG_PAGES - bprm->p; /* the strings */ 873#endif 874 stack_len += (bprm->argc + 1) * sizeof(char *); /* the argv array */ 875 stack_len += (bprm->envc + 1) * sizeof(char *); /* the envp array */ 876 stack_len = ALIGN(stack_len, FLAT_STACK_ALIGN); 877 878 res = load_flat_file(bprm, &libinfo, &stack_len); 879 if (res < 0) 880 return res; 881 882 /* Update data segment pointers for all libraries */ 883 for (i = 0; i < MAX_SHARED_LIBS; i++) { 884 if (!libinfo.lib_list[i].loaded) 885 continue; 886 for (j = 0; j < MAX_SHARED_LIBS; j++) { 887 unsigned long val = libinfo.lib_list[j].loaded ? 888 libinfo.lib_list[j].start_data : UNLOADED_LIB; 889 unsigned long __user *p = (unsigned long __user *) 890 libinfo.lib_list[i].start_data; 891 p -= j + 1; 892 if (put_user(val, p)) 893 return -EFAULT; 894 } 895 } 896 897 set_binfmt(&flat_format); 898 899#ifdef CONFIG_MMU 900 res = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT); 901 if (!res) 902 res = create_flat_tables(bprm, bprm->p); 903#else 904 /* Stash our initial stack pointer into the mm structure */ 905 current->mm->start_stack = 906 ((current->mm->context.end_brk + stack_len + 3) & ~3) - 4; 907 pr_debug("sp=%lx\n", current->mm->start_stack); 908 909 /* copy the arg pages onto the stack */ 910 res = transfer_args_to_stack(bprm, ¤t->mm->start_stack); 911 if (!res) 912 res = create_flat_tables(bprm, current->mm->start_stack); 913#endif 914 if (res) 915 return res; 916 917 /* Fake some return addresses to ensure the call chain will 918 * initialise library in order for us. We are required to call 919 * lib 1 first, then 2, ... and finally the main program (id 0). 920 */ 921 start_addr = libinfo.lib_list[0].entry; 922 923#ifdef FLAT_PLAT_INIT 924 FLAT_PLAT_INIT(regs); 925#endif 926 927 finalize_exec(bprm); 928 pr_debug("start_thread(regs=0x%p, entry=0x%lx, start_stack=0x%lx)\n", 929 regs, start_addr, current->mm->start_stack); 930 start_thread(regs, start_addr, current->mm->start_stack); 931 932 return 0; 933} 934 935/****************************************************************************/ 936 937static int __init init_flat_binfmt(void) 938{ 939 register_binfmt(&flat_format); 940 return 0; 941} 942core_initcall(init_flat_binfmt); 943 944/****************************************************************************/