cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

mmap.c (23237B)


      1/*
      2 *  mmap support for qemu
      3 *
      4 *  Copyright (c) 2003 - 2008 Fabrice Bellard
      5 *
      6 *  This program is free software; you can redistribute it and/or modify
      7 *  it under the terms of the GNU General Public License as published by
      8 *  the Free Software Foundation; either version 2 of the License, or
      9 *  (at your option) any later version.
     10 *
     11 *  This program is distributed in the hope that it will be useful,
     12 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
     13 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     14 *  GNU General Public License for more details.
     15 *
     16 *  You should have received a copy of the GNU General Public License
     17 *  along with this program; if not, see <http://www.gnu.org/licenses/>.
     18 */
     19#include "qemu/osdep.h"
     20
     21#include "qemu.h"
     22#include "qemu-common.h"
     23
     24//#define DEBUG_MMAP
     25
     26static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
     27static __thread int mmap_lock_count;
     28
     29void mmap_lock(void)
     30{
     31    if (mmap_lock_count++ == 0) {
     32        pthread_mutex_lock(&mmap_mutex);
     33    }
     34}
     35
     36void mmap_unlock(void)
     37{
     38    if (--mmap_lock_count == 0) {
     39        pthread_mutex_unlock(&mmap_mutex);
     40    }
     41}
     42
     43bool have_mmap_lock(void)
     44{
     45    return mmap_lock_count > 0 ? true : false;
     46}
     47
     48/* Grab lock to make sure things are in a consistent state after fork().  */
     49void mmap_fork_start(void)
     50{
     51    if (mmap_lock_count)
     52        abort();
     53    pthread_mutex_lock(&mmap_mutex);
     54}
     55
     56void mmap_fork_end(int child)
     57{
     58    if (child)
     59        pthread_mutex_init(&mmap_mutex, NULL);
     60    else
     61        pthread_mutex_unlock(&mmap_mutex);
     62}
     63
     64/* NOTE: all the constants are the HOST ones, but addresses are target. */
     65int target_mprotect(abi_ulong start, abi_ulong len, int prot)
     66{
     67    abi_ulong end, host_start, host_end, addr;
     68    int prot1, ret;
     69
     70#ifdef DEBUG_MMAP
     71    printf("mprotect: start=0x" TARGET_ABI_FMT_lx
     72           "len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
     73           prot & PROT_READ ? 'r' : '-',
     74           prot & PROT_WRITE ? 'w' : '-',
     75           prot & PROT_EXEC ? 'x' : '-');
     76#endif
     77
     78    if ((start & ~TARGET_PAGE_MASK) != 0)
     79        return -EINVAL;
     80    len = TARGET_PAGE_ALIGN(len);
     81    end = start + len;
     82    if (end < start)
     83        return -EINVAL;
     84    prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
     85    if (len == 0)
     86        return 0;
     87
     88    mmap_lock();
     89    host_start = start & qemu_host_page_mask;
     90    host_end = HOST_PAGE_ALIGN(end);
     91    if (start > host_start) {
     92        /* handle host page containing start */
     93        prot1 = prot;
     94        for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
     95            prot1 |= page_get_flags(addr);
     96        }
     97        if (host_end == host_start + qemu_host_page_size) {
     98            for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
     99                prot1 |= page_get_flags(addr);
    100            }
    101            end = host_end;
    102        }
    103        ret = mprotect(g2h_untagged(host_start),
    104                       qemu_host_page_size, prot1 & PAGE_BITS);
    105        if (ret != 0)
    106            goto error;
    107        host_start += qemu_host_page_size;
    108    }
    109    if (end < host_end) {
    110        prot1 = prot;
    111        for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
    112            prot1 |= page_get_flags(addr);
    113        }
    114        ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
    115                       qemu_host_page_size, prot1 & PAGE_BITS);
    116        if (ret != 0)
    117            goto error;
    118        host_end -= qemu_host_page_size;
    119    }
    120
    121    /* handle the pages in the middle */
    122    if (host_start < host_end) {
    123        ret = mprotect(g2h_untagged(host_start), host_end - host_start, prot);
    124        if (ret != 0)
    125            goto error;
    126    }
    127    page_set_flags(start, start + len, prot | PAGE_VALID);
    128    mmap_unlock();
    129    return 0;
    130error:
    131    mmap_unlock();
    132    return ret;
    133}
    134
    135/* map an incomplete host page */
    136static int mmap_frag(abi_ulong real_start,
    137                     abi_ulong start, abi_ulong end,
    138                     int prot, int flags, int fd, abi_ulong offset)
    139{
    140    abi_ulong real_end, addr;
    141    void *host_start;
    142    int prot1, prot_new;
    143
    144    real_end = real_start + qemu_host_page_size;
    145    host_start = g2h_untagged(real_start);
    146
    147    /* get the protection of the target pages outside the mapping */
    148    prot1 = 0;
    149    for (addr = real_start; addr < real_end; addr++) {
    150        if (addr < start || addr >= end)
    151            prot1 |= page_get_flags(addr);
    152    }
    153
    154    if (prot1 == 0) {
    155        /* no page was there, so we allocate one */
    156        void *p = mmap(host_start, qemu_host_page_size, prot,
    157                       flags | MAP_ANON, -1, 0);
    158        if (p == MAP_FAILED)
    159            return -1;
    160        prot1 = prot;
    161    }
    162    prot1 &= PAGE_BITS;
    163
    164    prot_new = prot | prot1;
    165    if (!(flags & MAP_ANON)) {
    166        /* msync() won't work here, so we return an error if write is
    167           possible while it is a shared mapping */
    168        if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
    169            (prot & PROT_WRITE))
    170            return -1;
    171
    172        /* adjust protection to be able to read */
    173        if (!(prot1 & PROT_WRITE))
    174            mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
    175
    176        /* read the corresponding file data */
    177        pread(fd, g2h_untagged(start), end - start, offset);
    178
    179        /* put final protection */
    180        if (prot_new != (prot1 | PROT_WRITE))
    181            mprotect(host_start, qemu_host_page_size, prot_new);
    182    } else {
    183        /* just update the protection */
    184        if (prot_new != prot1) {
    185            mprotect(host_start, qemu_host_page_size, prot_new);
    186        }
    187    }
    188    return 0;
    189}
    190
    191#if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
    192# define TASK_UNMAPPED_BASE  (1ul << 38)
    193#else
    194# define TASK_UNMAPPED_BASE  0x40000000
    195#endif
    196abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
    197
    198unsigned long last_brk;
    199
    200/*
    201 * Subroutine of mmap_find_vma, used when we have pre-allocated a chunk of guest
    202 * address space.
    203 */
    204static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
    205                                        abi_ulong alignment)
    206{
    207    abi_ulong addr;
    208    abi_ulong end_addr;
    209    int prot;
    210    int looped = 0;
    211
    212    if (size > reserved_va) {
    213        return (abi_ulong)-1;
    214    }
    215
    216    size = HOST_PAGE_ALIGN(size) + alignment;
    217    end_addr = start + size;
    218    if (end_addr > reserved_va) {
    219        end_addr = reserved_va;
    220    }
    221    addr = end_addr - qemu_host_page_size;
    222
    223    while (1) {
    224        if (addr > end_addr) {
    225            if (looped) {
    226                return (abi_ulong)-1;
    227            }
    228            end_addr = reserved_va;
    229            addr = end_addr - qemu_host_page_size;
    230            looped = 1;
    231            continue;
    232        }
    233        prot = page_get_flags(addr);
    234        if (prot) {
    235            end_addr = addr;
    236        }
    237        if (end_addr - addr >= size) {
    238            break;
    239        }
    240        addr -= qemu_host_page_size;
    241    }
    242
    243    if (start == mmap_next_start) {
    244        mmap_next_start = addr;
    245    }
    246    /* addr is sufficiently low to align it up */
    247    if (alignment != 0) {
    248        addr = (addr + alignment) & ~(alignment - 1);
    249    }
    250    return addr;
    251}
    252
    253/*
    254 * Find and reserve a free memory area of size 'size'. The search
    255 * starts at 'start'.
    256 * It must be called with mmap_lock() held.
    257 * Return -1 if error.
    258 */
    259static abi_ulong mmap_find_vma_aligned(abi_ulong start, abi_ulong size,
    260                                       abi_ulong alignment)
    261{
    262    void *ptr, *prev;
    263    abi_ulong addr;
    264    int flags;
    265    int wrapped, repeat;
    266
    267    /* If 'start' == 0, then a default start address is used. */
    268    if (start == 0) {
    269        start = mmap_next_start;
    270    } else {
    271        start &= qemu_host_page_mask;
    272    }
    273
    274    size = HOST_PAGE_ALIGN(size);
    275
    276    if (reserved_va) {
    277        return mmap_find_vma_reserved(start, size,
    278            (alignment != 0 ? 1 << alignment : 0));
    279    }
    280
    281    addr = start;
    282    wrapped = repeat = 0;
    283    prev = 0;
    284    flags = MAP_ANONYMOUS | MAP_PRIVATE;
    285#ifdef MAP_ALIGNED
    286    if (alignment != 0) {
    287        flags |= MAP_ALIGNED(alignment);
    288    }
    289#else
    290    /* XXX TODO */
    291#endif
    292
    293    for (;; prev = ptr) {
    294        /*
    295         * Reserve needed memory area to avoid a race.
    296         * It should be discarded using:
    297         *  - mmap() with MAP_FIXED flag
    298         *  - mremap() with MREMAP_FIXED flag
    299         *  - shmat() with SHM_REMAP flag
    300         */
    301        ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
    302                   flags, -1, 0);
    303
    304        /* ENOMEM, if host address space has no memory */
    305        if (ptr == MAP_FAILED) {
    306            return (abi_ulong)-1;
    307        }
    308
    309        /*
    310         * Count the number of sequential returns of the same address.
    311         * This is used to modify the search algorithm below.
    312         */
    313        repeat = (ptr == prev ? repeat + 1 : 0);
    314
    315        if (h2g_valid(ptr + size - 1)) {
    316            addr = h2g(ptr);
    317
    318            if ((addr & ~TARGET_PAGE_MASK) == 0) {
    319                /* Success.  */
    320                if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
    321                    mmap_next_start = addr + size;
    322                }
    323                return addr;
    324            }
    325
    326            /* The address is not properly aligned for the target.  */
    327            switch (repeat) {
    328            case 0:
    329                /*
    330                 * Assume the result that the kernel gave us is the
    331                 * first with enough free space, so start again at the
    332                 * next higher target page.
    333                 */
    334                addr = TARGET_PAGE_ALIGN(addr);
    335                break;
    336            case 1:
    337                /*
    338                 * Sometimes the kernel decides to perform the allocation
    339                 * at the top end of memory instead.
    340                 */
    341                addr &= TARGET_PAGE_MASK;
    342                break;
    343            case 2:
    344                /* Start over at low memory.  */
    345                addr = 0;
    346                break;
    347            default:
    348                /* Fail.  This unaligned block must the last.  */
    349                addr = -1;
    350                break;
    351            }
    352        } else {
    353            /*
    354             * Since the result the kernel gave didn't fit, start
    355             * again at low memory.  If any repetition, fail.
    356             */
    357            addr = (repeat ? -1 : 0);
    358        }
    359
    360        /* Unmap and try again.  */
    361        munmap(ptr, size);
    362
    363        /* ENOMEM if we checked the whole of the target address space.  */
    364        if (addr == (abi_ulong)-1) {
    365            return (abi_ulong)-1;
    366        } else if (addr == 0) {
    367            if (wrapped) {
    368                return (abi_ulong)-1;
    369            }
    370            wrapped = 1;
    371            /*
    372             * Don't actually use 0 when wrapping, instead indicate
    373             * that we'd truly like an allocation in low memory.
    374             */
    375            addr = TARGET_PAGE_SIZE;
    376        } else if (wrapped && addr >= start) {
    377            return (abi_ulong)-1;
    378        }
    379    }
    380}
    381
    382abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
    383{
    384    return mmap_find_vma_aligned(start, size, 0);
    385}
    386
    387/* NOTE: all the constants are the HOST ones */
    388abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
    389                     int flags, int fd, off_t offset)
    390{
    391    abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
    392
    393    mmap_lock();
    394#ifdef DEBUG_MMAP
    395    {
    396        printf("mmap: start=0x" TARGET_ABI_FMT_lx
    397               " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
    398               start, len,
    399               prot & PROT_READ ? 'r' : '-',
    400               prot & PROT_WRITE ? 'w' : '-',
    401               prot & PROT_EXEC ? 'x' : '-');
    402        if (flags & MAP_ALIGNMENT_MASK) {
    403            printf("MAP_ALIGNED(%u) ", (flags & MAP_ALIGNMENT_MASK)
    404                    >> MAP_ALIGNMENT_SHIFT);
    405        }
    406#if MAP_GUARD
    407        if (flags & MAP_GUARD) {
    408            printf("MAP_GUARD ");
    409        }
    410#endif
    411        if (flags & MAP_FIXED) {
    412            printf("MAP_FIXED ");
    413        }
    414        if (flags & MAP_ANONYMOUS) {
    415            printf("MAP_ANON ");
    416        }
    417#ifdef MAP_EXCL
    418        if (flags & MAP_EXCL) {
    419            printf("MAP_EXCL ");
    420        }
    421#endif
    422        if (flags & MAP_PRIVATE) {
    423            printf("MAP_PRIVATE ");
    424        }
    425        if (flags & MAP_SHARED) {
    426            printf("MAP_SHARED ");
    427        }
    428        if (flags & MAP_NOCORE) {
    429            printf("MAP_NOCORE ");
    430        }
    431#ifdef MAP_STACK
    432        if (flags & MAP_STACK) {
    433            printf("MAP_STACK ");
    434        }
    435#endif
    436        printf("fd=%d offset=0x%llx\n", fd, offset);
    437    }
    438#endif
    439
    440    if ((flags & MAP_ANONYMOUS) && fd != -1) {
    441        errno = EINVAL;
    442        goto fail;
    443    }
    444#ifdef MAP_STACK
    445    if (flags & MAP_STACK) {
    446        if ((fd != -1) || ((prot & (PROT_READ | PROT_WRITE)) !=
    447                    (PROT_READ | PROT_WRITE))) {
    448            errno = EINVAL;
    449            goto fail;
    450        }
    451    }
    452#endif /* MAP_STACK */
    453#ifdef MAP_GUARD
    454    if ((flags & MAP_GUARD) && (prot != PROT_NONE || fd != -1 ||
    455        offset != 0 || (flags & (MAP_SHARED | MAP_PRIVATE |
    456        /* MAP_PREFAULT | */ /* MAP_PREFAULT not in mman.h */
    457        MAP_PREFAULT_READ | MAP_ANON | MAP_STACK)) != 0)) {
    458        errno = EINVAL;
    459        goto fail;
    460    }
    461#endif
    462
    463    if (offset & ~TARGET_PAGE_MASK) {
    464        errno = EINVAL;
    465        goto fail;
    466    }
    467
    468    len = TARGET_PAGE_ALIGN(len);
    469    if (len == 0) {
    470        errno = EINVAL;
    471        goto fail;
    472    }
    473    real_start = start & qemu_host_page_mask;
    474    host_offset = offset & qemu_host_page_mask;
    475
    476    /*
    477     * If the user is asking for the kernel to find a location, do that
    478     * before we truncate the length for mapping files below.
    479     */
    480    if (!(flags & MAP_FIXED)) {
    481        host_len = len + offset - host_offset;
    482        host_len = HOST_PAGE_ALIGN(host_len);
    483        if ((flags & MAP_ALIGNMENT_MASK) != 0)
    484            start = mmap_find_vma_aligned(real_start, host_len,
    485                (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT);
    486        else
    487            start = mmap_find_vma(real_start, host_len);
    488        if (start == (abi_ulong)-1) {
    489            errno = ENOMEM;
    490            goto fail;
    491        }
    492    }
    493
    494    /*
    495     * When mapping files into a memory area larger than the file, accesses
    496     * to pages beyond the file size will cause a SIGBUS.
    497     *
    498     * For example, if mmaping a file of 100 bytes on a host with 4K pages
    499     * emulating a target with 8K pages, the target expects to be able to
    500     * access the first 8K. But the host will trap us on any access beyond
    501     * 4K.
    502     *
    503     * When emulating a target with a larger page-size than the hosts, we
    504     * may need to truncate file maps at EOF and add extra anonymous pages
    505     * up to the targets page boundary.
    506     */
    507
    508    if ((qemu_real_host_page_size < qemu_host_page_size) && fd != -1) {
    509        struct stat sb;
    510
    511        if (fstat(fd, &sb) == -1) {
    512            goto fail;
    513        }
    514
    515        /* Are we trying to create a map beyond EOF?.  */
    516        if (offset + len > sb.st_size) {
    517            /*
    518             * If so, truncate the file map at eof aligned with
    519             * the hosts real pagesize. Additional anonymous maps
    520             * will be created beyond EOF.
    521             */
    522            len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
    523        }
    524    }
    525
    526    if (!(flags & MAP_FIXED)) {
    527        unsigned long host_start;
    528        void *p;
    529
    530        host_len = len + offset - host_offset;
    531        host_len = HOST_PAGE_ALIGN(host_len);
    532
    533        /*
    534         * Note: we prefer to control the mapping address. It is
    535         * especially important if qemu_host_page_size >
    536         * qemu_real_host_page_size
    537         */
    538        p = mmap(g2h_untagged(start), host_len, prot,
    539                 flags | MAP_FIXED | ((fd != -1) ? MAP_ANONYMOUS : 0), -1, 0);
    540        if (p == MAP_FAILED)
    541            goto fail;
    542        /* update start so that it points to the file position at 'offset' */
    543        host_start = (unsigned long)p;
    544        if (fd != -1) {
    545            p = mmap(g2h_untagged(start), len, prot,
    546                     flags | MAP_FIXED, fd, host_offset);
    547            if (p == MAP_FAILED) {
    548                munmap(g2h_untagged(start), host_len);
    549                goto fail;
    550            }
    551            host_start += offset - host_offset;
    552        }
    553        start = h2g(host_start);
    554    } else {
    555        if (start & ~TARGET_PAGE_MASK) {
    556            errno = EINVAL;
    557            goto fail;
    558        }
    559        end = start + len;
    560        real_end = HOST_PAGE_ALIGN(end);
    561
    562        /*
    563         * Test if requested memory area fits target address space
    564         * It can fail only on 64-bit host with 32-bit target.
    565         * On any other target/host host mmap() handles this error correctly.
    566         */
    567#if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
    568        if ((unsigned long)start + len - 1 > (abi_ulong) -1) {
    569            errno = EINVAL;
    570            goto fail;
    571        }
    572#endif
    573
    574        /*
    575         * worst case: we cannot map the file because the offset is not
    576         * aligned, so we read it
    577         */
    578        if (!(flags & MAP_ANON) &&
    579            (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
    580            /*
    581             * msync() won't work here, so we return an error if write is
    582             * possible while it is a shared mapping
    583             */
    584            if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
    585                (prot & PROT_WRITE)) {
    586                errno = EINVAL;
    587                goto fail;
    588            }
    589            retaddr = target_mmap(start, len, prot | PROT_WRITE,
    590                                  MAP_FIXED | MAP_PRIVATE | MAP_ANON,
    591                                  -1, 0);
    592            if (retaddr == -1)
    593                goto fail;
    594            pread(fd, g2h_untagged(start), len, offset);
    595            if (!(prot & PROT_WRITE)) {
    596                ret = target_mprotect(start, len, prot);
    597                if (ret != 0) {
    598                    start = ret;
    599                    goto the_end;
    600                }
    601            }
    602            goto the_end;
    603        }
    604
    605        /* handle the start of the mapping */
    606        if (start > real_start) {
    607            if (real_end == real_start + qemu_host_page_size) {
    608                /* one single host page */
    609                ret = mmap_frag(real_start, start, end,
    610                                prot, flags, fd, offset);
    611                if (ret == -1)
    612                    goto fail;
    613                goto the_end1;
    614            }
    615            ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
    616                            prot, flags, fd, offset);
    617            if (ret == -1)
    618                goto fail;
    619            real_start += qemu_host_page_size;
    620        }
    621        /* handle the end of the mapping */
    622        if (end < real_end) {
    623            ret = mmap_frag(real_end - qemu_host_page_size,
    624                            real_end - qemu_host_page_size, end,
    625                            prot, flags, fd,
    626                            offset + real_end - qemu_host_page_size - start);
    627            if (ret == -1)
    628                goto fail;
    629            real_end -= qemu_host_page_size;
    630        }
    631
    632        /* map the middle (easier) */
    633        if (real_start < real_end) {
    634            void *p;
    635            unsigned long offset1;
    636            if (flags & MAP_ANON)
    637                offset1 = 0;
    638            else
    639                offset1 = offset + real_start - start;
    640            p = mmap(g2h_untagged(real_start), real_end - real_start,
    641                     prot, flags, fd, offset1);
    642            if (p == MAP_FAILED)
    643                goto fail;
    644        }
    645    }
    646 the_end1:
    647    page_set_flags(start, start + len, prot | PAGE_VALID);
    648 the_end:
    649#ifdef DEBUG_MMAP
    650    printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
    651    page_dump(stdout);
    652    printf("\n");
    653#endif
    654    tb_invalidate_phys_range(start, start + len);
    655    mmap_unlock();
    656    return start;
    657fail:
    658    mmap_unlock();
    659    return -1;
    660}
    661
    662static void mmap_reserve(abi_ulong start, abi_ulong size)
    663{
    664    abi_ulong real_start;
    665    abi_ulong real_end;
    666    abi_ulong addr;
    667    abi_ulong end;
    668    int prot;
    669
    670    real_start = start & qemu_host_page_mask;
    671    real_end = HOST_PAGE_ALIGN(start + size);
    672    end = start + size;
    673    if (start > real_start) {
    674        /* handle host page containing start */
    675        prot = 0;
    676        for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
    677            prot |= page_get_flags(addr);
    678        }
    679        if (real_end == real_start + qemu_host_page_size) {
    680            for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
    681                prot |= page_get_flags(addr);
    682            }
    683            end = real_end;
    684        }
    685        if (prot != 0) {
    686            real_start += qemu_host_page_size;
    687        }
    688    }
    689    if (end < real_end) {
    690        prot = 0;
    691        for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
    692            prot |= page_get_flags(addr);
    693        }
    694        if (prot != 0) {
    695            real_end -= qemu_host_page_size;
    696        }
    697    }
    698    if (real_start != real_end) {
    699        mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
    700                 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE,
    701                 -1, 0);
    702    }
    703}
    704
    705int target_munmap(abi_ulong start, abi_ulong len)
    706{
    707    abi_ulong end, real_start, real_end, addr;
    708    int prot, ret;
    709
    710#ifdef DEBUG_MMAP
    711    printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
    712           TARGET_ABI_FMT_lx "\n",
    713           start, len);
    714#endif
    715    if (start & ~TARGET_PAGE_MASK)
    716        return -EINVAL;
    717    len = TARGET_PAGE_ALIGN(len);
    718    if (len == 0)
    719        return -EINVAL;
    720    mmap_lock();
    721    end = start + len;
    722    real_start = start & qemu_host_page_mask;
    723    real_end = HOST_PAGE_ALIGN(end);
    724
    725    if (start > real_start) {
    726        /* handle host page containing start */
    727        prot = 0;
    728        for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
    729            prot |= page_get_flags(addr);
    730        }
    731        if (real_end == real_start + qemu_host_page_size) {
    732            for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
    733                prot |= page_get_flags(addr);
    734            }
    735            end = real_end;
    736        }
    737        if (prot != 0)
    738            real_start += qemu_host_page_size;
    739    }
    740    if (end < real_end) {
    741        prot = 0;
    742        for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
    743            prot |= page_get_flags(addr);
    744        }
    745        if (prot != 0)
    746            real_end -= qemu_host_page_size;
    747    }
    748
    749    ret = 0;
    750    /* unmap what we can */
    751    if (real_start < real_end) {
    752        if (reserved_va) {
    753            mmap_reserve(real_start, real_end - real_start);
    754        } else {
    755            ret = munmap(g2h_untagged(real_start), real_end - real_start);
    756        }
    757    }
    758
    759    if (ret == 0) {
    760        page_set_flags(start, start + len, 0);
    761        tb_invalidate_phys_range(start, start + len);
    762    }
    763    mmap_unlock();
    764    return ret;
    765}
    766
    767int target_msync(abi_ulong start, abi_ulong len, int flags)
    768{
    769    abi_ulong end;
    770
    771    if (start & ~TARGET_PAGE_MASK)
    772        return -EINVAL;
    773    len = TARGET_PAGE_ALIGN(len);
    774    end = start + len;
    775    if (end < start)
    776        return -EINVAL;
    777    if (end == start)
    778        return 0;
    779
    780    start &= qemu_host_page_mask;
    781    return msync(g2h_untagged(start), end - start, flags);
    782}