From de174b573ce3cdab200a96ad484b6213c5dbef7a Mon Sep 17 00:00:00 2001
From: "Michael S. Tsirkin" <mst@redhat.com>
Date: Sun, 23 Sep 2012 14:31:59 -0300
Subject: [RHEL6 qemu-kvm PATCH 5/5] kvm: kill dead KVM_UPSTREAM code

RH-Author: Michael S. Tsirkin <mst@redhat.com>
Message-id: <d735fd1091cb63e7f4a4dca131e0a168b278d54c.1348408802.git.mst@redhat.com>
Patchwork-id: 42291
O-Subject: [PATCHv2 RHEL6.4 3/3] kvm: kill dead KVM_UPSTREAM code
Bugzilla: 835101
RH-Acked-by: Xiao Wang <jasowang@redhat.com>
RH-Acked-by: Eduardo Habkost <ehabkost@redhat.com>
RH-Acked-by: Gleb Natapov <gleb@redhat.com>

The purpose of KVM_UPSTREAM was to ease merging from
qemu.git. However, suh a merge would clearly be impractical
for RHEL6 at this point.
Removing this dead code makes it clearer what actually
runs, avoiding backport and cut and paste errors.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
 kvm-all.c         | 790 -------------------------------------------
 kvm.h             | 131 -------
 target-i386/kvm.c | 994 ------------------------------------------------------
 vl.c              |   5 -
 4 files changed, 1920 deletions(-)

Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
---
 kvm-all.c         | 790 -------------------------------------------
 kvm.h             | 131 -------
 target-i386/kvm.c | 994 ------------------------------------------------------
 vl.c              |   5 -
 4 files changed, 1920 deletions(-)

diff --git a/kvm-all.c b/kvm-all.c
index 5c56e1a..06274e6 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -31,336 +31,11 @@
 #include <sys/eventfd.h>
 #endif
 
-#ifdef KVM_UPSTREAM
-/* KVM uses PAGE_SIZE in it's definition of COALESCED_MMIO_MAX */
-#define PAGE_SIZE TARGET_PAGE_SIZE
-
-//#define DEBUG_KVM
-
-#ifdef DEBUG_KVM
-#define dprintf(fmt, ...) \
-    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
-#else
-#define dprintf(fmt, ...) \
-    do { } while (0)
-#endif
-
-typedef struct KVMSlot
-{
-    target_phys_addr_t start_addr;
-    ram_addr_t memory_size;
-    ram_addr_t phys_offset;
-    int slot;
-    int flags;
-} KVMSlot;
-
-typedef struct kvm_dirty_log KVMDirtyLog;
-
-struct KVMState
-{
-    KVMSlot slots[32];
-    int fd;
-    int vmfd;
-    int coalesced_mmio;
-#ifdef KVM_CAP_COALESCED_MMIO
-    struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
-#endif
-    bool coalesced_flush_in_progress;
-    int broken_set_mem_region;
-    int migration_log;
-    int vcpu_events;
-#ifdef KVM_CAP_SET_GUEST_DEBUG
-    struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
-#endif
-    int irqchip_in_kernel;
-    int pit_in_kernel;
-    int many_ioeventfds;
-};
-
-static KVMState *kvm_state;
-
-static KVMSlot *kvm_alloc_slot(KVMState *s)
-{
-    int i;
-
-    for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
-        /* KVM private memory slots */
-        if (i >= 8 && i < 12)
-            continue;
-        if (s->slots[i].memory_size == 0)
-            return &s->slots[i];
-    }
-
-    fprintf(stderr, "%s: no free slot available\n", __func__);
-    abort();
-}
-
-static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
-                                         target_phys_addr_t start_addr,
-                                         target_phys_addr_t end_addr)
-{
-    int i;
-
-    for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
-        KVMSlot *mem = &s->slots[i];
-
-        if (start_addr == mem->start_addr &&
-            end_addr == mem->start_addr + mem->memory_size) {
-            return mem;
-        }
-    }
-
-    return NULL;
-}
-
-/*
- * Find overlapping slot with lowest start address
- */
-static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
-                                            target_phys_addr_t start_addr,
-                                            target_phys_addr_t end_addr)
-{
-    KVMSlot *found = NULL;
-    int i;
-
-    for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
-        KVMSlot *mem = &s->slots[i];
-
-        if (mem->memory_size == 0 ||
-            (found && found->start_addr < mem->start_addr)) {
-            continue;
-        }
-
-        if (end_addr > mem->start_addr &&
-            start_addr < mem->start_addr + mem->memory_size) {
-            found = mem;
-        }
-    }
-
-    return found;
-}
-
-static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
-{
-    struct kvm_userspace_memory_region mem;
-
-    mem.slot = slot->slot;
-    mem.guest_phys_addr = slot->start_addr;
-    mem.memory_size = slot->memory_size;
-    mem.userspace_addr = (unsigned long)qemu_get_ram_ptr(slot->phys_offset);
-    mem.flags = slot->flags;
-    if (s->migration_log) {
-        mem.flags |= KVM_MEM_LOG_DIRTY_PAGES;
-    }
-    return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
-}
-
-static void kvm_reset_vcpu(void *opaque)
-{
-    CPUState *env = opaque;
-
-    kvm_arch_reset_vcpu(env);
-    if (kvm_arch_put_registers(env)) {
-        fprintf(stderr, "Fatal: kvm vcpu reset failed\n");
-        abort();
-    }
-}
-#endif
-
 int kvm_irqchip_in_kernel(void)
 {
     return kvm_state->irqchip_in_kernel;
 }
 
-#ifdef KVM_UPSTREAM
-int kvm_pit_in_kernel(void)
-{
-    return kvm_state->pit_in_kernel;
-}
-
-
-int kvm_init_vcpu(CPUState *env)
-{
-    KVMState *s = kvm_state;
-    long mmap_size;
-    int ret;
-
-    dprintf("kvm_init_vcpu\n");
-
-    ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, env->cpu_index);
-    if (ret < 0) {
-        dprintf("kvm_create_vcpu failed\n");
-        goto err;
-    }
-
-    env->kvm_fd = ret;
-    env->kvm_state = s;
-
-    mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
-    if (mmap_size < 0) {
-        dprintf("KVM_GET_VCPU_MMAP_SIZE failed\n");
-        goto err;
-    }
-
-    env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
-                        env->kvm_fd, 0);
-    if (env->kvm_run == MAP_FAILED) {
-        ret = -errno;
-        dprintf("mmap'ing vcpu state failed\n");
-        goto err;
-    }
-
-#ifdef KVM_CAP_COALESCED_MMIO
-    if (s->coalesced_mmio && !s->coalesced_mmio_ring)
-        s->coalesced_mmio_ring = (void *) env->kvm_run +
-		s->coalesced_mmio * PAGE_SIZE;
-#endif
-
-    ret = kvm_arch_init_vcpu(env);
-    if (ret == 0) {
-        qemu_register_reset(kvm_reset_vcpu, env);
-        kvm_arch_reset_vcpu(env);
-        ret = kvm_arch_put_registers(env);
-    }
-err:
-    return ret;
-}
-
-/*
- * dirty pages logging control
- */
-static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr,
-                                      ram_addr_t size, int flags, int mask)
-{
-    KVMState *s = kvm_state;
-    KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size);
-    int old_flags;
-
-    if (mem == NULL)  {
-            fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-"
-                    TARGET_FMT_plx "\n", __func__, phys_addr,
-                    (target_phys_addr_t)(phys_addr + size - 1));
-            return -EINVAL;
-    }
-
-    old_flags = mem->flags;
-
-    flags = (mem->flags & ~mask) | flags;
-    mem->flags = flags;
-
-    /* If nothing changed effectively, no need to issue ioctl */
-    if (s->migration_log) {
-        flags |= KVM_MEM_LOG_DIRTY_PAGES;
-    }
-    if (flags == old_flags) {
-            return 0;
-    }
-
-    return kvm_set_user_memory_region(s, mem);
-}
-
-int kvm_log_start(target_phys_addr_t phys_addr, ram_addr_t size)
-{
-        return kvm_dirty_pages_log_change(phys_addr, size,
-                                          KVM_MEM_LOG_DIRTY_PAGES,
-                                          KVM_MEM_LOG_DIRTY_PAGES);
-}
-
-int kvm_log_stop(target_phys_addr_t phys_addr, ram_addr_t size)
-{
-        return kvm_dirty_pages_log_change(phys_addr, size,
-                                          0,
-                                          KVM_MEM_LOG_DIRTY_PAGES);
-}
-
-int kvm_set_migration_log(int enable)
-{
-    KVMState *s = kvm_state;
-    KVMSlot *mem;
-    int i, err;
-
-    s->migration_log = enable;
-
-    for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
-        mem = &s->slots[i];
-
-        if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) {
-            continue;
-        }
-        err = kvm_set_user_memory_region(s, mem);
-        if (err) {
-            return err;
-        }
-    }
-    return 0;
-}
-
-static int test_le_bit(unsigned long nr, unsigned char *addr)
-{
-    return (addr[nr >> 3] >> (nr & 7)) & 1;
-}
-
-/**
- * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
- * This function updates qemu's dirty bitmap using cpu_physical_memory_set_dirty().
- * This means all bits are set to dirty.
- *
- * @start_add: start of logged region.
- * @end_addr: end of logged region.
- */
-int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
-                                   target_phys_addr_t end_addr)
-{
-    KVMState *s = kvm_state;
-    unsigned long size, allocated_size = 0;
-    target_phys_addr_t phys_addr;
-    ram_addr_t addr;
-    KVMDirtyLog d;
-    KVMSlot *mem;
-    int ret = 0;
-
-    d.dirty_bitmap = NULL;
-    while (start_addr < end_addr) {
-        mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr);
-        if (mem == NULL) {
-            break;
-        }
-
-        size = ((mem->memory_size >> TARGET_PAGE_BITS) + 7) / 8;
-        if (!d.dirty_bitmap) {
-            d.dirty_bitmap = qemu_malloc(size);
-        } else if (size > allocated_size) {
-            d.dirty_bitmap = qemu_realloc(d.dirty_bitmap, size);
-        }
-        allocated_size = size;
-        memset(d.dirty_bitmap, 0, allocated_size);
-
-        d.slot = mem->slot;
-
-        if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
-            dprintf("ioctl failed %d\n", errno);
-            ret = -1;
-            break;
-        }
-
-        for (phys_addr = mem->start_addr, addr = mem->phys_offset;
-             phys_addr < mem->start_addr + mem->memory_size;
-             phys_addr += TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
-            unsigned char *bitmap = (unsigned char *)d.dirty_bitmap;
-            unsigned nr = (phys_addr - mem->start_addr) >> TARGET_PAGE_BITS;
-
-            if (test_le_bit(nr, bitmap)) {
-                cpu_physical_memory_set_dirty(addr);
-            }
-        }
-        start_addr = phys_addr;
-    }
-    qemu_free(d.dirty_bitmap);
-
-    return ret;
-}
-#endif
-
 int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
 {
     int ret = -ENOSYS;
@@ -444,121 +119,6 @@ int kvm_check_many_ioeventfds(void)
 #endif
 }
 
-#ifdef KVM_UPSTREAM
-int kvm_init(int smp_cpus)
-{
-    static const char upgrade_note[] =
-        "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
-        "(see http://sourceforge.net/projects/kvm).\n";
-    KVMState *s;
-    int ret;
-    int i;
-
-    if (smp_cpus > 1) {
-        fprintf(stderr, "No SMP KVM support, use '-smp 1'\n");
-        return -EINVAL;
-    }
-
-    s = qemu_mallocz(sizeof(KVMState));
-
-#ifdef KVM_CAP_SET_GUEST_DEBUG
-    QTAILQ_INIT(&s->kvm_sw_breakpoints);
-#endif
-    for (i = 0; i < ARRAY_SIZE(s->slots); i++)
-        s->slots[i].slot = i;
-
-    s->vmfd = -1;
-    s->fd = qemu_open("/dev/kvm", O_RDWR);
-    if (s->fd == -1) {
-        fprintf(stderr, "Could not access KVM kernel module: %m\n");
-        ret = -errno;
-        goto err;
-    }
-
-    ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
-    if (ret < KVM_API_VERSION) {
-        if (ret > 0)
-            ret = -EINVAL;
-        fprintf(stderr, "kvm version too old\n");
-        goto err;
-    }
-
-    if (ret > KVM_API_VERSION) {
-        ret = -EINVAL;
-        fprintf(stderr, "kvm version not supported\n");
-        goto err;
-    }
-
-    s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0);
-    if (s->vmfd < 0)
-        goto err;
-
-    /* initially, KVM allocated its own memory and we had to jump through
-     * hooks to make phys_ram_base point to this.  Modern versions of KVM
-     * just use a user allocated buffer so we can use regular pages
-     * unmodified.  Make sure we have a sufficiently modern version of KVM.
-     */
-    if (!kvm_check_extension(s, KVM_CAP_USER_MEMORY)) {
-        ret = -EINVAL;
-        fprintf(stderr, "kvm does not support KVM_CAP_USER_MEMORY\n%s",
-                upgrade_note);
-        goto err;
-    }
-
-    /* There was a nasty bug in < kvm-80 that prevents memory slots from being
-     * destroyed properly.  Since we rely on this capability, refuse to work
-     * with any kernel without this capability. */
-    if (!kvm_check_extension(s, KVM_CAP_DESTROY_MEMORY_REGION_WORKS)) {
-        ret = -EINVAL;
-
-        fprintf(stderr,
-                "KVM kernel module broken (DESTROY_MEMORY_REGION).\n%s",
-                upgrade_note);
-        goto err;
-    }
-
-    s->coalesced_mmio = 0;
-#ifdef KVM_CAP_COALESCED_MMIO
-    s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
-    s->coalesced_mmio_ring = NULL;
-#endif
-
-    s->broken_set_mem_region = 1;
-#ifdef KVM_CAP_JOIN_MEMORY_REGIONS_WORKS
-    ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS);
-    if (ret > 0) {
-        s->broken_set_mem_region = 0;
-    }
-#endif
-
-    s->vcpu_events = 0;
-#ifdef KVM_CAP_VCPU_EVENTS
-    s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
-#endif
-
-    ret = kvm_arch_init(s, smp_cpus);
-    if (ret < 0)
-        goto err;
-
-    kvm_state = s;
-
-    s->many_ioeventfds = kvm_check_many_ioeventfds();
-
-    return 0;
-
-err:
-    if (s) {
-        if (s->vmfd != -1)
-            close(s->vmfd);
-        if (s->fd != -1)
-            close(s->fd);
-    }
-    qemu_free(s);
-
-    return ret;
-}
-#endif
-
 static int kvm_handle_io(uint16_t port, void *data, int direction, int size,
                          uint32_t count)
 {
@@ -598,283 +158,6 @@ static int kvm_handle_io(uint16_t port, void *data, int direction, int size,
     return 1;
 }
 
-#ifdef KVM_UPSTREAM
-void kvm_flush_coalesced_mmio_buffer(void)
-{
-#ifdef KVM_CAP_COALESCED_MMIO
-    KVMState *s = kvm_state;
-
-    if (s->coalesced_flush_in_progress) {
-        return;
-    }
-
-    s->coalesced_flush_in_progress = true;
-
-    if (s->coalesced_mmio_ring) {
-        struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
-        while (ring->first != ring->last) {
-            struct kvm_coalesced_mmio *ent;
-
-            ent = &ring->coalesced_mmio[ring->first];
-
-            cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
-            /* FIXME smp_wmb() */
-            ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
-        }
-    }
-
-    s->coalesced_flush_in_progress = false;
-#endif
-}
-
-void kvm_cpu_synchronize_state(CPUState *env)
-{
-    if (!env->kvm_state->regs_modified) {
-        kvm_arch_get_registers(env);
-        env->kvm_state->regs_modified = 1;
-    }
-}
-
-int kvm_cpu_exec(CPUState *env)
-{
-    struct kvm_run *run = env->kvm_run;
-    int ret;
-
-    dprintf("kvm_cpu_exec()\n");
-
-    do {
-        if (env->exit_request) {
-            dprintf("interrupt exit requested\n");
-            ret = 0;
-            break;
-        }
-
-        if (env->kvm_state->regs_modified) {
-            kvm_arch_put_registers(env);
-            env->kvm_state->regs_modified = 0;
-        }
-
-        kvm_arch_pre_run(env, run);
-        qemu_mutex_unlock_iothread();
-        ret = kvm_vcpu_ioctl(env, KVM_RUN, 0);
-        qemu_mutex_lock_iothread();
-        kvm_arch_post_run(env, run);
-
-        kvm_flush_coalesced_mmio_buffer();
-
-        if (ret == -EINTR || ret == -EAGAIN) {
-            dprintf("io window exit\n");
-            ret = 0;
-            break;
-        }
-
-        if (ret < 0) {
-            dprintf("kvm run failed %s\n", strerror(-ret));
-            abort();
-        }
-
-        ret = 0; /* exit loop */
-        switch (run->exit_reason) {
-        case KVM_EXIT_IO:
-            dprintf("handle_io\n");
-            ret = kvm_handle_io(run->io.port,
-                                (uint8_t *)run + run->io.data_offset,
-                                run->io.direction,
-                                run->io.size,
-                                run->io.count);
-            break;
-        case KVM_EXIT_MMIO:
-            dprintf("handle_mmio\n");
-            cpu_physical_memory_rw(run->mmio.phys_addr,
-                                   run->mmio.data,
-                                   run->mmio.len,
-                                   run->mmio.is_write);
-            ret = 1;
-            break;
-        case KVM_EXIT_IRQ_WINDOW_OPEN:
-            dprintf("irq_window_open\n");
-            break;
-        case KVM_EXIT_SHUTDOWN:
-            dprintf("shutdown\n");
-            qemu_system_reset_request();
-            ret = 1;
-            break;
-        case KVM_EXIT_UNKNOWN:
-            dprintf("kvm_exit_unknown\n");
-            break;
-        case KVM_EXIT_FAIL_ENTRY:
-            dprintf("kvm_exit_fail_entry\n");
-            break;
-        case KVM_EXIT_EXCEPTION:
-            dprintf("kvm_exit_exception\n");
-            break;
-        case KVM_EXIT_DEBUG:
-            dprintf("kvm_exit_debug\n");
-#ifdef KVM_CAP_SET_GUEST_DEBUG
-            if (kvm_arch_debug(&run->debug.arch)) {
-                gdb_set_stop_cpu(env);
-                vm_stop(RUN_STATE_DEBUG);
-                env->exception_index = EXCP_DEBUG;
-                return 0;
-            }
-            /* re-enter, this exception was guest-internal */
-            ret = 1;
-#endif /* KVM_CAP_SET_GUEST_DEBUG */
-            break;
-        default:
-            dprintf("kvm_arch_handle_exit\n");
-            ret = kvm_arch_handle_exit(env, run);
-            break;
-        }
-    } while (ret > 0);
-
-    if (env->exit_request) {
-        env->exit_request = 0;
-        env->exception_index = EXCP_INTERRUPT;
-    }
-
-    return ret;
-}
-
-void kvm_set_phys_mem(target_phys_addr_t start_addr,
-                      ram_addr_t size,
-                      ram_addr_t phys_offset)
-{
-    KVMState *s = kvm_state;
-    ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK;
-    KVMSlot *mem, old;
-    int err;
-
-    if (start_addr & ~TARGET_PAGE_MASK) {
-        if (flags >= IO_MEM_UNASSIGNED) {
-            if (!kvm_lookup_overlapping_slot(s, start_addr,
-                                             start_addr + size)) {
-                return;
-            }
-            fprintf(stderr, "Unaligned split of a KVM memory slot\n");
-        } else {
-            fprintf(stderr, "Only page-aligned memory slots supported\n");
-        }
-        abort();
-    }
-
-    /* KVM does not support read-only slots */
-    phys_offset &= ~IO_MEM_ROM;
-
-    while (1) {
-        mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size);
-        if (!mem) {
-            break;
-        }
-
-        if (flags < IO_MEM_UNASSIGNED && start_addr >= mem->start_addr &&
-            (start_addr + size <= mem->start_addr + mem->memory_size) &&
-            (phys_offset - start_addr == mem->phys_offset - mem->start_addr)) {
-            /* The new slot fits into the existing one and comes with
-             * identical parameters - nothing to be done. */
-            return;
-        }
-
-        old = *mem;
-
-        /* unregister the overlapping slot */
-        mem->memory_size = 0;
-        err = kvm_set_user_memory_region(s, mem);
-        if (err) {
-            fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
-                    __func__, strerror(-err));
-            abort();
-        }
-
-        /* Workaround for older KVM versions: we can't join slots, even not by
-         * unregistering the previous ones and then registering the larger
-         * slot. We have to maintain the existing fragmentation. Sigh.
-         *
-         * This workaround assumes that the new slot starts at the same
-         * address as the first existing one. If not or if some overlapping
-         * slot comes around later, we will fail (not seen in practice so far)
-         * - and actually require a recent KVM version. */
-        if (s->broken_set_mem_region &&
-            old.start_addr == start_addr && old.memory_size < size &&
-            flags < IO_MEM_UNASSIGNED) {
-            mem = kvm_alloc_slot(s);
-            mem->memory_size = old.memory_size;
-            mem->start_addr = old.start_addr;
-            mem->phys_offset = old.phys_offset;
-            mem->flags = 0;
-
-            err = kvm_set_user_memory_region(s, mem);
-            if (err) {
-                fprintf(stderr, "%s: error updating slot: %s\n", __func__,
-                        strerror(-err));
-                abort();
-            }
-
-            start_addr += old.memory_size;
-            phys_offset += old.memory_size;
-            size -= old.memory_size;
-            continue;
-        }
-
-        /* register prefix slot */
-        if (old.start_addr < start_addr) {
-            mem = kvm_alloc_slot(s);
-            mem->memory_size = start_addr - old.start_addr;
-            mem->start_addr = old.start_addr;
-            mem->phys_offset = old.phys_offset;
-            mem->flags = 0;
-
-            err = kvm_set_user_memory_region(s, mem);
-            if (err) {
-                fprintf(stderr, "%s: error registering prefix slot: %s\n",
-                        __func__, strerror(-err));
-                abort();
-            }
-        }
-
-        /* register suffix slot */
-        if (old.start_addr + old.memory_size > start_addr + size) {
-            ram_addr_t size_delta;
-
-            mem = kvm_alloc_slot(s);
-            mem->start_addr = start_addr + size;
-            size_delta = mem->start_addr - old.start_addr;
-            mem->memory_size = old.memory_size - size_delta;
-            mem->phys_offset = old.phys_offset + size_delta;
-            mem->flags = 0;
-
-            err = kvm_set_user_memory_region(s, mem);
-            if (err) {
-                fprintf(stderr, "%s: error registering suffix slot: %s\n",
-                        __func__, strerror(-err));
-                abort();
-            }
-        }
-    }
-
-    /* in case the KVM bug workaround already "consumed" the new slot */
-    if (!size)
-        return;
-
-    /* KVM does not need to know about this memory */
-    if (flags >= IO_MEM_UNASSIGNED)
-        return;
-
-    mem = kvm_alloc_slot(s);
-    mem->memory_size = size;
-    mem->start_addr = start_addr;
-    mem->phys_offset = phys_offset;
-    mem->flags = 0;
-
-    err = kvm_set_user_memory_region(s, mem);
-    if (err) {
-        fprintf(stderr, "%s: error registering slot: %s\n", __func__,
-                strerror(-err));
-        abort();
-    }
-}
-
-#endif
 int kvm_ioctl(KVMState *s, int type, ...)
 {
     int ret;
@@ -950,44 +233,8 @@ int kvm_has_many_ioeventfds(void)
     return kvm_state->many_ioeventfds;
 }
 
-#ifdef KVM_UPSTREAM
-void kvm_setup_guest_memory(void *start, size_t size)
-{
-    if (!kvm_has_sync_mmu()) {
-#ifdef MADV_DONTFORK
-        int ret = madvise(start, size, MADV_DONTFORK);
-
-        if (ret) {
-            perror("madvice");
-            exit(1);
-        }
-#else
-        fprintf(stderr,
-                "Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
-        exit(1);
-#endif
-    }
-}
-
-#endif /* KVM_UPSTREAM */
-
 #ifdef KVM_CAP_SET_GUEST_DEBUG
 
-#ifdef KVM_UPSTREAM
-static void on_vcpu(CPUState *env, void (*func)(void *data), void *data)
-{
-#ifdef CONFIG_IOTHREAD
-    if (env == cpu_single_env) {
-        func(data);
-        return;
-    }
-    abort();
-#else
-    func(data);
-#endif
-}
-#endif /* KVM_UPSTREAM */
-
 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
                                                  target_ulong pc)
 {
@@ -1005,43 +252,6 @@ int kvm_sw_breakpoints_active(CPUState *env)
     return !QTAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints);
 }
 
-#ifdef KVM_UPSTREAM
-
-struct kvm_set_guest_debug_data {
-    struct kvm_guest_debug dbg;
-    CPUState *env;
-    int err;
-};
-
-static void kvm_invoke_set_guest_debug(void *data)
-{
-    struct kvm_set_guest_debug_data *dbg_data = data;
-    CPUState *env = dbg_data->env;
-
-    if (env->kvm_state->regs_modified) {
-        kvm_arch_put_registers(env);
-        env->kvm_state->regs_modified = 0;
-    }
-    dbg_data->err = kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg);
-}
-
-int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
-{
-    struct kvm_set_guest_debug_data data;
-
-    data.dbg.control = 0;
-    if (env->singlestep_enabled)
-        data.dbg.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
-
-    kvm_arch_update_guest_debug(env, &data.dbg);
-    data.dbg.control |= reinject_trap;
-    data.env = env;
-
-    on_vcpu(env, kvm_invoke_set_guest_debug, &data);
-    return data.err;
-}
-#endif
-
 int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
                           target_ulong len, int type)
 {
diff --git a/kvm.h b/kvm.h
index c10d4a0..6c22f98 100644
--- a/kvm.h
+++ b/kvm.h
@@ -19,144 +19,13 @@
 #include "qemu-queue.h"
 #include "qemu-kvm.h"
 
-#ifdef KVM_UPSTREAM
-
-#ifdef CONFIG_KVM
-#include <linux/kvm.h>
-#endif
-
-#ifdef CONFIG_KVM
-extern int kvm_allowed;
-
-#define kvm_enabled() (kvm_allowed)
-#else
-#define kvm_enabled() (0)
-#endif
-
-struct kvm_run;
-
-/* external API */
-
-int kvm_init(int smp_cpus);
-
-int kvm_init_vcpu(CPUState *env);
-
-int kvm_cpu_exec(CPUState *env);
-
-void kvm_set_phys_mem(target_phys_addr_t start_addr,
-                      ram_addr_t size,
-                      ram_addr_t phys_offset);
-
-int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
-                                   target_phys_addr_t end_addr);
-
-int kvm_log_start(target_phys_addr_t phys_addr, ram_addr_t size);
-int kvm_log_stop(target_phys_addr_t phys_addr, ram_addr_t size);
-int kvm_set_migration_log(int enable);
-
-int kvm_has_sync_mmu(void);
-#endif /* KVM_UPSTREAM */
 int kvm_has_vcpu_events(void);
 int kvm_put_vcpu_events(CPUState *env);
 int kvm_get_vcpu_events(CPUState *env);
 
 void kvm_flush_coalesced_mmio_buffer(void);
 
-#ifdef KVM_UPSTREAM
-
-void kvm_setup_guest_memory(void *start, size_t size);
-
-int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size);
-int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size);
-
-int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
-                          target_ulong len, int type);
-int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
-                          target_ulong len, int type);
-void kvm_remove_all_breakpoints(CPUState *current_env);
-int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap);
-
-int kvm_pit_in_kernel(void);
-int kvm_irqchip_in_kernel(void);
-
-/* internal API */
-
-struct KVMState;
-typedef struct KVMState KVMState;
-
-int kvm_ioctl(KVMState *s, int type, ...);
-
-int kvm_vm_ioctl(KVMState *s, int type, ...);
-
-int kvm_vcpu_ioctl(CPUState *env, int type, ...);
-
-/* Arch specific hooks */
-
-int kvm_arch_post_run(CPUState *env, struct kvm_run *run);
-
-int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run);
-
-int kvm_arch_pre_run(CPUState *env, struct kvm_run *run);
-
-int kvm_arch_get_registers(CPUState *env);
-
-int kvm_arch_put_registers(CPUState *env);
-
-int kvm_arch_init(KVMState *s, int smp_cpus);
-
-int kvm_arch_init_vcpu(CPUState *env);
-
-#endif
 void kvm_arch_reset_vcpu(CPUState *env);
-#ifdef KVM_UPSTREAM
-
-struct kvm_guest_debug;
-struct kvm_debug_exit_arch;
-
-struct kvm_sw_breakpoint {
-    target_ulong pc;
-    target_ulong saved_insn;
-    int use_count;
-    QTAILQ_ENTRY(kvm_sw_breakpoint) entry;
-};
-
-QTAILQ_HEAD(kvm_sw_breakpoint_head, kvm_sw_breakpoint);
-
-int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info);
-
-struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
-                                                 target_ulong pc);
-
-int kvm_sw_breakpoints_active(CPUState *env);
-
-int kvm_arch_insert_sw_breakpoint(CPUState *current_env,
-                                  struct kvm_sw_breakpoint *bp);
-int kvm_arch_remove_sw_breakpoint(CPUState *current_env,
-                                  struct kvm_sw_breakpoint *bp);
-int kvm_arch_insert_hw_breakpoint(target_ulong addr,
-                                  target_ulong len, int type);
-int kvm_arch_remove_hw_breakpoint(target_ulong addr,
-                                  target_ulong len, int type);
-void kvm_arch_remove_all_hw_breakpoints(void);
-
-void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg);
-
-int kvm_check_extension(KVMState *s, unsigned int extension);
-
-uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function,
-                                      uint32_t index, int reg);
-void kvm_cpu_synchronize_state(CPUState *env);
-
-/* generic hooks - to be moved/refactored once there are more users */
-
-static inline void cpu_synchronize_state(CPUState *env)
-{
-    if (kvm_enabled()) {
-        kvm_cpu_synchronize_state(env);
-    }
-}
-
-#endif
 
 int kvm_has_many_ioeventfds(void);
 
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index a3e87dd..3d22663 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -25,216 +25,6 @@
 #include "gdbstub.h"
 #include "host-utils.h"
 
-#ifdef KVM_UPSTREAM
-//#define DEBUG_KVM
-
-#ifdef DEBUG_KVM
-#define dprintf(fmt, ...) \
-    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
-#else
-#define dprintf(fmt, ...) \
-    do { } while (0)
-#endif
-
-#define MSR_KVM_WALL_CLOCK  0x11
-#define MSR_KVM_SYSTEM_TIME 0x12
-
-#ifdef KVM_CAP_EXT_CPUID
-
-static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
-{
-    struct kvm_cpuid2 *cpuid;
-    int r, size;
-
-    size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
-    cpuid = (struct kvm_cpuid2 *)qemu_mallocz(size);
-    cpuid->nent = max;
-    r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
-    if (r == 0 && cpuid->nent >= max) {
-        r = -E2BIG;
-    }
-    if (r < 0) {
-        if (r == -E2BIG) {
-            qemu_free(cpuid);
-            return NULL;
-        } else {
-            fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
-                    strerror(-r));
-            exit(1);
-        }
-    }
-    return cpuid;
-}
-
-uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
-                                      uint32_t index, int reg)
-{
-    struct kvm_cpuid2 *cpuid;
-    int i, max;
-    uint32_t ret = 0;
-    uint32_t cpuid_1_edx;
-
-    if (!kvm_check_extension(struct, KVM_CAP_EXT_CPUID)) {
-        return -1U;
-    }
-
-    max = 1;
-    while ((cpuid = try_get_cpuid(s, max)) == NULL) {
-        max *= 2;
-    }
-
-    for (i = 0; i < cpuid->nent; ++i) {
-        if (cpuid->entries[i].function == function &&
-	    cpuid->entries[i].index == index) {
-            switch (reg) {
-            case R_EAX:
-                ret = cpuid->entries[i].eax;
-                break;
-            case R_EBX:
-                ret = cpuid->entries[i].ebx;
-                break;
-            case R_ECX:
-                ret = cpuid->entries[i].ecx;
-                break;
-            case R_EDX:
-                ret = cpuid->entries[i].edx;
-                if (function == 0x80000001) {
-                    /* On Intel, kvm returns cpuid according to the Intel spec,
-                     * so add missing bits according to the AMD spec:
-                     */
-                    cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
-                    ret |= cpuid_1_edx & 0xdfeff7ff;
-                }
-                break;
-            }
-        }
-    }
-
-    qemu_free(cpuid);
-
-    return ret;
-}
-
-#else
-
-uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function,
-                                      uint32_t index, int reg)
-{
-    return -1U;
-}
-
-#endif
-
-static void kvm_trim_features(uint32_t *features, uint32_t supported)
-{
-    int i;
-    uint32_t mask;
-
-    for (i = 0; i < 32; ++i) {
-        mask = 1U << i;
-        if ((*features & mask) && !(supported & mask)) {
-            *features &= ~mask;
-        }
-    }
-}
-
-int kvm_arch_init_vcpu(CPUState *env)
-{
-    struct {
-        struct kvm_cpuid2 cpuid;
-        struct kvm_cpuid_entry2 entries[100];
-    } __attribute__((packed)) cpuid_data;
-    KVMState *s = env->kvm_state;
-    uint32_t limit, i, j, cpuid_i;
-    uint32_t unused;
-
-    env->mp_state = KVM_MP_STATE_RUNNABLE;
-
-    kvm_trim_features(&env->cpuid_features,
-        kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX));
-
-    i = env->cpuid_ext_features & CPUID_EXT_HYPERVISOR;
-    kvm_trim_features(&env->cpuid_ext_features,
-        kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX));
-    env->cpuid_ext_features |= i;
-
-    kvm_trim_features(&env->cpuid_ext2_features,
-        kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX));
-    kvm_trim_features(&env->cpuid_ext3_features,
-        kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_ECX));
-
-    cpuid_i = 0;
-
-    cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
-
-    for (i = 0; i <= limit; i++) {
-        struct kvm_cpuid_entry2 *c = &cpuid_data.entries[cpuid_i++];
-
-        switch (i) {
-        case 2: {
-            /* Keep reading function 2 till all the input is received */
-            int times;
-
-            c->function = i;
-            c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
-                       KVM_CPUID_FLAG_STATE_READ_NEXT;
-            cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
-            times = c->eax & 0xff;
-
-            for (j = 1; j < times; ++j) {
-                c = &cpuid_data.entries[cpuid_i++];
-                c->function = i;
-                c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
-                cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
-            }
-            break;
-        }
-        case 4:
-        case 0xb:
-        case 0xd:
-            for (j = 0; ; j++) {
-                if (i == 0xd && j == 64) {
-                    break;
-                }
-                c->function = i;
-                c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
-                c->index = j;
-                cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
-
-                if (i == 4 && c->eax == 0)
-                    break;
-                if (i == 0xb && !(c->ecx & 0xff00))
-                    break;
-                if (i == 0xd && c->eax == 0)
-                    continue;
-
-                c = &cpuid_data.entries[cpuid_i++];
-            }
-            break;
-        default:
-            c->function = i;
-            c->flags = 0;
-            cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
-            break;
-        }
-    }
-    cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
-
-    for (i = 0x80000000; i <= limit; i++) {
-        struct kvm_cpuid_entry2 *c = &cpuid_data.entries[cpuid_i++];
-
-        c->function = i;
-        c->flags = 0;
-        cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
-    }
-
-    cpuid_data.cpuid.nent = cpuid_i;
-
-    return kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
-}
-
-#endif
-
 static void kvm_clear_vapic(CPUState *env)
 {
 #ifdef KVM_SET_VAPIC_ADDR
@@ -255,492 +45,6 @@ void kvm_arch_reset_vcpu(CPUState *env)
     /* Legal xcr0 for loading */
     env->xcr0 = 1;
 }
-#ifdef KVM_UPSTREAM
-
-static int kvm_has_msr_star(CPUState *env)
-{
-    static int has_msr_star;
-    int ret;
-
-    /* first time */
-    if (has_msr_star == 0) {        
-        struct kvm_msr_list msr_list, *kvm_msr_list;
-
-        has_msr_star = -1;
-
-        /* Obtain MSR list from KVM.  These are the MSRs that we must
-         * save/restore */
-        msr_list.nmsrs = 0;
-        ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, &msr_list);
-        if (ret < 0 && ret != -E2BIG) {
-            return 0;
-        }
-        /* Old kernel modules had a bug and could write beyond the provided
-           memory. Allocate at least a safe amount of 1K. */
-        kvm_msr_list = qemu_mallocz(MAX(1024, sizeof(msr_list) +
-                                              msr_list.nmsrs *
-                                              sizeof(msr_list.indices[0])));
-
-        kvm_msr_list->nmsrs = msr_list.nmsrs;
-        ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
-        if (ret >= 0) {
-            int i;
-
-            for (i = 0; i < kvm_msr_list->nmsrs; i++) {
-                if (kvm_msr_list->indices[i] == MSR_STAR) {
-                    has_msr_star = 1;
-                    break;
-                }
-            }
-        }
-
-        free(kvm_msr_list);
-    }
-
-    if (has_msr_star == 1)
-        return 1;
-    return 0;
-}
-
-int kvm_arch_init(KVMState *s, int smp_cpus)
-{
-    int ret;
-
-    /* create vm86 tss.  KVM uses vm86 mode to emulate 16-bit code
-     * directly.  In order to use vm86 mode, a TSS is needed.  Since this
-     * must be part of guest physical memory, we need to allocate it.  Older
-     * versions of KVM just assumed that it would be at the end of physical
-     * memory but that doesn't work with more than 4GB of memory.  We simply
-     * refuse to work with those older versions of KVM. */
-    ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
-    if (ret <= 0) {
-        fprintf(stderr, "kvm does not support KVM_CAP_SET_TSS_ADDR\n");
-        return ret;
-    }
-
-    /* this address is 3 pages before the bios, and the bios should present
-     * as unavaible memory.  FIXME, need to ensure the e820 map deals with
-     * this?
-     */
-    return kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, 0xfffbd000);
-}
-                    
-static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
-{
-    lhs->selector = rhs->selector;
-    lhs->base = rhs->base;
-    lhs->limit = rhs->limit;
-    lhs->type = 3;
-    lhs->present = 1;
-    lhs->dpl = 3;
-    lhs->db = 0;
-    lhs->s = 1;
-    lhs->l = 0;
-    lhs->g = 0;
-    lhs->avl = 0;
-    lhs->unusable = 0;
-}
-
-static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
-{
-    unsigned flags = rhs->flags;
-    lhs->selector = rhs->selector;
-    lhs->base = rhs->base;
-    lhs->limit = rhs->limit;
-    lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
-    lhs->present = (flags & DESC_P_MASK) != 0;
-    lhs->dpl = rhs->selector & 3;
-    lhs->db = (flags >> DESC_B_SHIFT) & 1;
-    lhs->s = (flags & DESC_S_MASK) != 0;
-    lhs->l = (flags >> DESC_L_SHIFT) & 1;
-    lhs->g = (flags & DESC_G_MASK) != 0;
-    lhs->avl = (flags & DESC_AVL_MASK) != 0;
-    lhs->unusable = 0;
-}
-
-static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
-{
-    lhs->selector = rhs->selector;
-    lhs->base = rhs->base;
-    lhs->limit = rhs->limit;
-    lhs->flags =
-	(rhs->type << DESC_TYPE_SHIFT)
-	| (rhs->present * DESC_P_MASK)
-	| (rhs->dpl << DESC_DPL_SHIFT)
-	| (rhs->db << DESC_B_SHIFT)
-	| (rhs->s * DESC_S_MASK)
-	| (rhs->l << DESC_L_SHIFT)
-	| (rhs->g * DESC_G_MASK)
-	| (rhs->avl * DESC_AVL_MASK);
-}
-
-static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
-{
-    if (set)
-        *kvm_reg = *qemu_reg;
-    else
-        *qemu_reg = *kvm_reg;
-}
-
-static int kvm_getput_regs(CPUState *env, int set)
-{
-    struct kvm_regs regs;
-    int ret = 0;
-
-    if (!set) {
-        ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
-        if (ret < 0)
-            return ret;
-    }
-
-    kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set);
-    kvm_getput_reg(&regs.rbx, &env->regs[R_EBX], set);
-    kvm_getput_reg(&regs.rcx, &env->regs[R_ECX], set);
-    kvm_getput_reg(&regs.rdx, &env->regs[R_EDX], set);
-    kvm_getput_reg(&regs.rsi, &env->regs[R_ESI], set);
-    kvm_getput_reg(&regs.rdi, &env->regs[R_EDI], set);
-    kvm_getput_reg(&regs.rsp, &env->regs[R_ESP], set);
-    kvm_getput_reg(&regs.rbp, &env->regs[R_EBP], set);
-#ifdef TARGET_X86_64
-    kvm_getput_reg(&regs.r8, &env->regs[8], set);
-    kvm_getput_reg(&regs.r9, &env->regs[9], set);
-    kvm_getput_reg(&regs.r10, &env->regs[10], set);
-    kvm_getput_reg(&regs.r11, &env->regs[11], set);
-    kvm_getput_reg(&regs.r12, &env->regs[12], set);
-    kvm_getput_reg(&regs.r13, &env->regs[13], set);
-    kvm_getput_reg(&regs.r14, &env->regs[14], set);
-    kvm_getput_reg(&regs.r15, &env->regs[15], set);
-#endif
-
-    kvm_getput_reg(&regs.rflags, &env->eflags, set);
-    kvm_getput_reg(&regs.rip, &env->eip, set);
-
-    if (set)
-        ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, &regs);
-
-    return ret;
-}
-
-static int kvm_put_fpu(CPUState *env)
-{
-    struct kvm_fpu fpu;
-    int i;
-
-    memset(&fpu, 0, sizeof fpu);
-    fpu.fsw = env->fpus & ~(7 << 11);
-    fpu.fsw |= (env->fpstt & 7) << 11;
-    fpu.fcw = env->fpuc;
-    for (i = 0; i < 8; ++i)
-	fpu.ftwx |= (!env->fptags[i]) << i;
-    memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
-    memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
-    fpu.mxcsr = env->mxcsr;
-
-    return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
-}
-
-static int kvm_put_sregs(CPUState *env)
-{
-    struct kvm_sregs sregs;
-
-    memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
-    if (env->interrupt_injected >= 0) {
-        sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
-                (uint64_t)1 << (env->interrupt_injected % 64);
-    }
-
-    if ((env->eflags & VM_MASK)) {
-	    set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
-	    set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
-	    set_v8086_seg(&sregs.es, &env->segs[R_ES]);
-	    set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
-	    set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
-	    set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
-    } else {
-	    set_seg(&sregs.cs, &env->segs[R_CS]);
-	    set_seg(&sregs.ds, &env->segs[R_DS]);
-	    set_seg(&sregs.es, &env->segs[R_ES]);
-	    set_seg(&sregs.fs, &env->segs[R_FS]);
-	    set_seg(&sregs.gs, &env->segs[R_GS]);
-	    set_seg(&sregs.ss, &env->segs[R_SS]);
-
-	    if (env->cr[0] & CR0_PE_MASK) {
-		/* force ss cpl to cs cpl */
-		sregs.ss.selector = (sregs.ss.selector & ~3) |
-			(sregs.cs.selector & 3);
-		sregs.ss.dpl = sregs.ss.selector & 3;
-	    }
-    }
-
-    set_seg(&sregs.tr, &env->tr);
-    set_seg(&sregs.ldt, &env->ldt);
-
-    sregs.idt.limit = env->idt.limit;
-    sregs.idt.base = env->idt.base;
-    sregs.gdt.limit = env->gdt.limit;
-    sregs.gdt.base = env->gdt.base;
-
-    sregs.cr0 = env->cr[0];
-    sregs.cr2 = env->cr[2];
-    sregs.cr3 = env->cr[3];
-    sregs.cr4 = env->cr[4];
-
-    sregs.cr8 = cpu_get_apic_tpr(env);
-    sregs.apic_base = cpu_get_apic_base(env);
-
-    sregs.efer = env->efer;
-
-    return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
-}
-
-static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
-                              uint32_t index, uint64_t value)
-{
-    entry->index = index;
-    entry->data = value;
-}
-
-static int kvm_put_msrs(CPUState *env)
-{
-    struct {
-        struct kvm_msrs info;
-        struct kvm_msr_entry entries[100];
-    } msr_data;
-    struct kvm_msr_entry *msrs = msr_data.entries;
-    int n = 0;
-
-    kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
-    kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
-    kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
-    if (kvm_has_msr_star(env))
-	kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
-    kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
-    kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
-#ifdef TARGET_X86_64
-    /* FIXME if lm capable */
-    kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
-    kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
-    kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
-    kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
-#endif
-    kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME,  env->system_time_msr);
-    kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK,  env->wall_clock_msr);
-
-    msr_data.info.nmsrs = n;
-
-    return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
-
-}
-
-
-static int kvm_get_fpu(CPUState *env)
-{
-    struct kvm_fpu fpu;
-    int i, ret;
-
-    ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu);
-    if (ret < 0)
-        return ret;
-
-    env->fpstt = (fpu.fsw >> 11) & 7;
-    env->fpus = fpu.fsw;
-    env->fpuc = fpu.fcw;
-    for (i = 0; i < 8; ++i)
-	env->fptags[i] = !((fpu.ftwx >> i) & 1);
-    memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
-    memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
-    env->mxcsr = fpu.mxcsr;
-
-    return 0;
-}
-
-static int kvm_get_sregs(CPUState *env)
-{
-    struct kvm_sregs sregs;
-    uint32_t hflags;
-    int bit, i, ret;
-
-    ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
-    if (ret < 0)
-        return ret;
-
-    /* There can only be one pending IRQ set in the bitmap at a time, so try
-       to find it and save its number instead (-1 for none). */
-    env->interrupt_injected = -1;
-    for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
-        if (sregs.interrupt_bitmap[i]) {
-            bit = ctz64(sregs.interrupt_bitmap[i]);
-            env->interrupt_injected = i * 64 + bit;
-            break;
-        }
-    }
-
-    get_seg(&env->segs[R_CS], &sregs.cs);
-    get_seg(&env->segs[R_DS], &sregs.ds);
-    get_seg(&env->segs[R_ES], &sregs.es);
-    get_seg(&env->segs[R_FS], &sregs.fs);
-    get_seg(&env->segs[R_GS], &sregs.gs);
-    get_seg(&env->segs[R_SS], &sregs.ss);
-
-    get_seg(&env->tr, &sregs.tr);
-    get_seg(&env->ldt, &sregs.ldt);
-
-    env->idt.limit = sregs.idt.limit;
-    env->idt.base = sregs.idt.base;
-    env->gdt.limit = sregs.gdt.limit;
-    env->gdt.base = sregs.gdt.base;
-
-    env->cr[0] = sregs.cr0;
-    env->cr[2] = sregs.cr2;
-    env->cr[3] = sregs.cr3;
-    env->cr[4] = sregs.cr4;
-
-    cpu_set_apic_base(env, sregs.apic_base);
-
-    env->efer = sregs.efer;
-    //cpu_set_apic_tpr(env, sregs.cr8);
-
-#define HFLAG_COPY_MASK ~( \
-			HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
-			HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
-			HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
-			HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
-
-
-
-    hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
-    hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
-    hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
-	    (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
-    hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
-    hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
-	    (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
-
-    if (env->efer & MSR_EFER_LMA) {
-        hflags |= HF_LMA_MASK;
-    }
-
-    if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
-        hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
-    } else {
-        hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
-		(DESC_B_SHIFT - HF_CS32_SHIFT);
-        hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
-		(DESC_B_SHIFT - HF_SS32_SHIFT);
-        if (!(env->cr[0] & CR0_PE_MASK) ||
-                   (env->eflags & VM_MASK) ||
-                   !(hflags & HF_CS32_MASK)) {
-                hflags |= HF_ADDSEG_MASK;
-            } else {
-                hflags |= ((env->segs[R_DS].base |
-                                env->segs[R_ES].base |
-                                env->segs[R_SS].base) != 0) <<
-                    HF_ADDSEG_SHIFT;
-            }
-    }
-    env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
-
-    return 0;
-}
-
-static int kvm_get_msrs(CPUState *env)
-{
-    struct {
-        struct kvm_msrs info;
-        struct kvm_msr_entry entries[100];
-    } msr_data;
-    struct kvm_msr_entry *msrs = msr_data.entries;
-    int ret, i, n;
-
-    n = 0;
-    msrs[n++].index = MSR_IA32_SYSENTER_CS;
-    msrs[n++].index = MSR_IA32_SYSENTER_ESP;
-    msrs[n++].index = MSR_IA32_SYSENTER_EIP;
-    if (kvm_has_msr_star(env))
-	msrs[n++].index = MSR_STAR;
-    msrs[n++].index = MSR_IA32_TSC;
-    msrs[n++].index = MSR_VM_HSAVE_PA;
-#ifdef TARGET_X86_64
-    /* FIXME lm_capable_kernel */
-    msrs[n++].index = MSR_CSTAR;
-    msrs[n++].index = MSR_KERNELGSBASE;
-    msrs[n++].index = MSR_FMASK;
-    msrs[n++].index = MSR_LSTAR;
-#endif
-    msrs[n++].index = MSR_KVM_SYSTEM_TIME;
-    msrs[n++].index = MSR_KVM_WALL_CLOCK;
-
-    msr_data.info.nmsrs = n;
-    ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data);
-    if (ret < 0)
-        return ret;
-
-    for (i = 0; i < ret; i++) {
-        switch (msrs[i].index) {
-        case MSR_IA32_SYSENTER_CS:
-            env->sysenter_cs = msrs[i].data;
-            break;
-        case MSR_IA32_SYSENTER_ESP:
-            env->sysenter_esp = msrs[i].data;
-            break;
-        case MSR_IA32_SYSENTER_EIP:
-            env->sysenter_eip = msrs[i].data;
-            break;
-        case MSR_STAR:
-            env->star = msrs[i].data;
-            break;
-#ifdef TARGET_X86_64
-        case MSR_CSTAR:
-            env->cstar = msrs[i].data;
-            break;
-        case MSR_KERNELGSBASE:
-            env->kernelgsbase = msrs[i].data;
-            break;
-        case MSR_FMASK:
-            env->fmask = msrs[i].data;
-            break;
-        case MSR_LSTAR:
-            env->lstar = msrs[i].data;
-            break;
-#endif
-        case MSR_IA32_TSC:
-            env->tsc = msrs[i].data;
-            break;
-        case MSR_KVM_SYSTEM_TIME:
-            env->system_time_msr = msrs[i].data;
-            break;
-        case MSR_KVM_WALL_CLOCK:
-            env->wall_clock_msr = msrs[i].data;
-            break;
-        case MSR_VM_HSAVE_PA:
-            env->vm_hsave = msrs[i].data;
-            break;
-        }
-    }
-
-    return 0;
-}
-
-static int kvm_put_mp_state(CPUState *env)
-{
-    struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
-
-    return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
-}
-
-static int kvm_get_mp_state(CPUState *env)
-{
-    struct kvm_mp_state mp_state;
-    int ret;
-
-    ret = kvm_vcpu_ioctl(env, KVM_GET_MP_STATE, &mp_state);
-    if (ret < 0) {
-        return ret;
-    }
-    env->mp_state = mp_state.mp_state;
-    return 0;
-}
-#endif
 
 int kvm_put_vcpu_events(CPUState *env)
 {
@@ -812,104 +116,6 @@ int kvm_get_vcpu_events(CPUState *env)
     return 0;
 }
 
-#ifdef KVM_UPSTREAM
-int kvm_arch_put_registers(CPUState *env)
-{
-    int ret;
-
-    ret = kvm_getput_regs(env, 1);
-    if (ret < 0)
-        return ret;
-
-    ret = kvm_put_fpu(env);
-    if (ret < 0)
-        return ret;
-
-    ret = kvm_put_sregs(env);
-    if (ret < 0)
-        return ret;
-
-    ret = kvm_put_msrs(env);
-    if (ret < 0)
-        return ret;
-
-    ret = kvm_put_mp_state(env);
-    if (ret < 0)
-        return ret;
-
-    ret = kvm_put_vcpu_events(env);
-    if (ret < 0)
-        return ret;
-
-    return 0;
-}
-
-int kvm_arch_get_registers(CPUState *env)
-{
-    int ret;
-
-    ret = kvm_getput_regs(env, 0);
-    if (ret < 0)
-        return ret;
-
-    ret = kvm_get_fpu(env);
-    if (ret < 0)
-        return ret;
-
-    ret = kvm_get_sregs(env);
-    if (ret < 0)
-        return ret;
-
-    ret = kvm_get_msrs(env);
-    if (ret < 0)
-        return ret;
-
-    ret = kvm_get_mp_state(env);
-    if (ret < 0)
-        return ret;
-
-    ret = kvm_get_vcpu_events(env);
-    if (ret < 0)
-        return ret;
-
-    return 0;
-}
-
-int kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
-{
-    /* Try to inject an interrupt if the guest can accept it */
-    if (run->ready_for_interrupt_injection &&
-        (env->interrupt_request & CPU_INTERRUPT_HARD) &&
-        (env->eflags & IF_MASK)) {
-        int irq;
-
-        env->interrupt_request &= ~CPU_INTERRUPT_HARD;
-        irq = cpu_get_pic_interrupt(env);
-        if (irq >= 0) {
-            struct kvm_interrupt intr;
-            intr.irq = irq;
-            /* FIXME: errors */
-            dprintf("injected interrupt %d\n", irq);
-            kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr);
-        }
-    }
-
-    /* If we have an interrupt but the guest is not ready to receive an
-     * interrupt, request an interrupt window exit.  This will
-     * cause a return to userspace as soon as the guest is ready to
-     * receive interrupts. */
-    if ((env->interrupt_request & CPU_INTERRUPT_HARD))
-        run->request_interrupt_window = 1;
-    else
-        run->request_interrupt_window = 0;
-
-    dprintf("setting tpr\n");
-    run->cr8 = cpu_get_apic_tpr(env);
-
-    return 0;
-}
-#endif
-
 int kvm_arch_post_run(CPUState *env, struct kvm_run *run)
 {
     if (run->if_flag)
@@ -923,204 +129,4 @@ int kvm_arch_post_run(CPUState *env, struct kvm_run *run)
     return 0;
 }
 
-#ifdef KVM_UPSTREAM
-static int kvm_handle_halt(CPUState *env)
-{
-    if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
-          (env->eflags & IF_MASK)) &&
-        !(env->interrupt_request & CPU_INTERRUPT_NMI)) {
-        env->halted = 1;
-        env->exception_index = EXCP_HLT;
-        return 0;
-    }
-
-    return 1;
-}
-
-int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
-{
-    int ret = 0;
-
-    switch (run->exit_reason) {
-    case KVM_EXIT_HLT:
-        dprintf("handle_hlt\n");
-        ret = kvm_handle_halt(env);
-        break;
-    }
-
-    return ret;
-}
-
-#ifdef KVM_CAP_SET_GUEST_DEBUG
-int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
-{
-    static const uint8_t int3 = 0xcc;
-
-    if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
-        cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&int3, 1, 1))
-        return -EINVAL;
-    return 0;
-}
-
-int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
-{
-    uint8_t int3;
-
-    if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
-        cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1))
-        return -EINVAL;
-    return 0;
-}
-
-static struct {
-    target_ulong addr;
-    int len;
-    int type;
-} hw_breakpoint[4];
-
-static int nb_hw_breakpoint;
-
-static int find_hw_breakpoint(target_ulong addr, int len, int type)
-{
-    int n;
-
-    for (n = 0; n < nb_hw_breakpoint; n++)
-        if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
-            (hw_breakpoint[n].len == len || len == -1))
-            return n;
-    return -1;
-}
-
-int kvm_arch_insert_hw_breakpoint(target_ulong addr,
-                                  target_ulong len, int type)
-{
-    switch (type) {
-    case GDB_BREAKPOINT_HW:
-        len = 1;
-        break;
-    case GDB_WATCHPOINT_WRITE:
-    case GDB_WATCHPOINT_ACCESS:
-        switch (len) {
-        case 1:
-            break;
-        case 2:
-        case 4:
-        case 8:
-            if (addr & (len - 1))
-                return -EINVAL;
-            break;
-        default:
-            return -EINVAL;
-        }
-        break;
-    default:
-        return -ENOSYS;
-    }
-
-    if (nb_hw_breakpoint == 4)
-        return -ENOBUFS;
-
-    if (find_hw_breakpoint(addr, len, type) >= 0)
-        return -EEXIST;
-
-    hw_breakpoint[nb_hw_breakpoint].addr = addr;
-    hw_breakpoint[nb_hw_breakpoint].len = len;
-    hw_breakpoint[nb_hw_breakpoint].type = type;
-    nb_hw_breakpoint++;
-
-    return 0;
-}
-
-int kvm_arch_remove_hw_breakpoint(target_ulong addr,
-                                  target_ulong len, int type)
-{
-    int n;
-
-    n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
-    if (n < 0)
-        return -ENOENT;
-
-    nb_hw_breakpoint--;
-    hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
-
-    return 0;
-}
-
-void kvm_arch_remove_all_hw_breakpoints(void)
-{
-    nb_hw_breakpoint = 0;
-}
-
-static CPUWatchpoint hw_watchpoint;
-
-int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info)
-{
-    int handle = 0;
-    int n;
-
-    if (arch_info->exception == 1) {
-        if (arch_info->dr6 & (1 << 14)) {
-            if (cpu_single_env->singlestep_enabled)
-                handle = 1;
-        } else {
-            for (n = 0; n < 4; n++)
-                if (arch_info->dr6 & (1 << n))
-                    switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
-                    case 0x0:
-                        handle = 1;
-                        break;
-                    case 0x1:
-                        handle = 1;
-                        cpu_single_env->watchpoint_hit = &hw_watchpoint;
-                        hw_watchpoint.vaddr = hw_breakpoint[n].addr;
-                        hw_watchpoint.flags = BP_MEM_WRITE;
-                        break;
-                    case 0x3:
-                        handle = 1;
-                        cpu_single_env->watchpoint_hit = &hw_watchpoint;
-                        hw_watchpoint.vaddr = hw_breakpoint[n].addr;
-                        hw_watchpoint.flags = BP_MEM_ACCESS;
-                        break;
-                    }
-        }
-    } else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc))
-        handle = 1;
-
-    if (!handle)
-        kvm_update_guest_debug(cpu_single_env,
-                        (arch_info->exception == 1) ?
-                        KVM_GUESTDBG_INJECT_DB : KVM_GUESTDBG_INJECT_BP);
-
-    return handle;
-}
-
-void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg)
-{
-    const uint8_t type_code[] = {
-        [GDB_BREAKPOINT_HW] = 0x0,
-        [GDB_WATCHPOINT_WRITE] = 0x1,
-        [GDB_WATCHPOINT_ACCESS] = 0x3
-    };
-    const uint8_t len_code[] = {
-        [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
-    };
-    int n;
-
-    if (kvm_sw_breakpoints_active(env))
-        dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
-
-    if (nb_hw_breakpoint > 0) {
-        dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
-        dbg->arch.debugreg[7] = 0x0600;
-        for (n = 0; n < nb_hw_breakpoint; n++) {
-            dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
-            dbg->arch.debugreg[7] |= (2 << (n * 2)) |
-                (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
-                ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
-        }
-    }
-}
-#endif /* KVM_CAP_SET_GUEST_DEBUG */
-#endif
-
 #include "qemu-kvm-x86.c"
diff --git a/vl.c b/vl.c
index 9c6a9eb..38ac114 100644
--- a/vl.c
+++ b/vl.c
@@ -3605,11 +3605,6 @@ void qemu_notify_event(void)
     }
 }
 
-#if defined(KVM_UPSTREAM) || !defined(CONFIG_KVM)
-void qemu_mutex_lock_iothread(void) {}
-void qemu_mutex_unlock_iothread(void) {}
-#endif
-
 void vm_stop(RunState state)
 {
     do_vm_stop(state);
-- 
1.7.11.4

