summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--Makefile24
-rw-r--r--Makefile.hw4
-rw-r--r--Makefile.target36
-rw-r--r--block/raw-posix.c2
-rw-r--r--cache-utils.h21
-rw-r--r--compat/sys/eventfd.h13
-rw-r--r--compatfd.c143
-rw-r--r--compatfd.h45
-rwxr-xr-xconfigure183
-rw-r--r--cpu-all.h1
-rw-r--r--cpu-common.h1
-rw-r--r--cpu-defs.h19
-rw-r--r--cpu-exec.c8
-rw-r--r--cutils.c5
-rw-r--r--dma-helpers.c4
-rw-r--r--exec.c172
-rw-r--r--fpu/softfloat-native.c1
-rw-r--r--gdbstub.c1
-rw-r--r--hw/acpi.c71
-rw-r--r--hw/apic.c144
-rw-r--r--hw/cirrus_vga.c18
-rw-r--r--hw/device-assignment.c1378
-rw-r--r--hw/device-assignment.h117
-rw-r--r--hw/extboot.c135
-rw-r--r--hw/hpet.c13
-rw-r--r--hw/hw.h13
-rw-r--r--hw/i8254-kvm.c122
-rw-r--r--hw/i8254.c137
-rw-r--r--hw/i8254.h69
-rw-r--r--hw/i8259.c144
-rw-r--r--hw/ioapic.c93
-rw-r--r--hw/ipf.c713
-rw-r--r--hw/msix.c159
-rw-r--r--hw/pc.c85
-rw-r--r--hw/pc.h25
-rw-r--r--hw/pci-hotplug.c23
-rw-r--r--hw/pci.c154
-rw-r--r--hw/pci.h61
-rw-r--r--hw/pcspk.c48
-rw-r--r--hw/piix_pci.c17
-rw-r--r--hw/ppc440.c1
-rw-r--r--hw/ppc440_bamboo.c1
-rw-r--r--hw/ppce500_mpc8544ds.c1
-rw-r--r--hw/testdev.c63
-rw-r--r--hw/vga-pci.c2
-rw-r--r--hw/vga.c47
-rw-r--r--hw/vga_int.h6
-rw-r--r--hw/virtio-balloon.c1
-rw-r--r--hw/virtio-console.c3
-rw-r--r--hw/vmport.c13
-rw-r--r--ia64.ld2
-rw-r--r--ia64intrin.h150
-rw-r--r--kvm-all.c20
-rw-r--r--kvm-tpr-opt.c410
-rw-r--r--kvm.h12
-rw-r--r--kvm/.gitignore66
-rw-r--r--kvm/Makefile125
-rwxr-xr-xkvm/configure159
-rw-r--r--kvm/doxygen.conf1252
-rw-r--r--kvm/extboot/Makefile41
-rw-r--r--kvm/extboot/STATUS6
-rw-r--r--kvm/extboot/signrom.c79
-rw-r--r--kvm/include/ia64/asm/kvm.h264
-rw-r--r--kvm/include/ia64/asm/kvm_para.h31
-rw-r--r--kvm/include/linux/compiler.h2
-rw-r--r--kvm/include/linux/config.h43
-rw-r--r--kvm/include/linux/kvm.h740
-rw-r--r--kvm/include/linux/kvm_para.h41
-rw-r--r--kvm/include/powerpc/asm/kvm.h62
-rw-r--r--kvm/include/powerpc/asm/kvm_para.h37
-rw-r--r--kvm/include/x86/asm/kvm.h287
-rw-r--r--kvm/include/x86/asm/kvm_para.h149
-rw-r--r--kvm/kvm.spec139
-rwxr-xr-xkvm/kvm_stat129
-rw-r--r--kvm/libfdt/Makefile19
-rw-r--r--kvm/libfdt/README3
-rw-r--r--kvm/libfdt/fdt.c194
-rw-r--r--kvm/libfdt/fdt.h60
-rw-r--r--kvm/libfdt/fdt_ro.c476
-rw-r--r--kvm/libfdt/fdt_rw.c467
-rw-r--r--kvm/libfdt/fdt_strerror.c96
-rw-r--r--kvm/libfdt/fdt_sw.c258
-rw-r--r--kvm/libfdt/fdt_wip.c144
-rw-r--r--kvm/libfdt/libfdt.h1076
-rw-r--r--kvm/libfdt/libfdt_env.h22
-rw-r--r--kvm/libfdt/libfdt_internal.h96
-rw-r--r--kvm/libkvm/Makefile54
-rw-r--r--kvm/libkvm/config-i386.mak6
-rw-r--r--kvm/libkvm/config-ia64.mak5
-rw-r--r--kvm/libkvm/config-ppc.mak4
-rw-r--r--kvm/libkvm/config-s390.mak3
-rw-r--r--kvm/libkvm/config-s390x.mak3
-rw-r--r--kvm/libkvm/config-x86_64.mak6
-rw-r--r--kvm/libkvm/kvm-common.h94
-rw-r--r--kvm/libkvm/kvm-ia64.h31
-rw-r--r--kvm/libkvm/kvm-powerpc.h36
-rw-r--r--kvm/libkvm/kvm-s390.h31
-rw-r--r--kvm/libkvm/kvm-x86.h55
-rw-r--r--kvm/libkvm/libkvm-ia64.c82
-rw-r--r--kvm/libkvm/libkvm-powerpc.c100
-rw-r--r--kvm/libkvm/libkvm-s390.c110
-rw-r--r--kvm/libkvm/libkvm-x86.c676
-rw-r--r--kvm/libkvm/libkvm.c1497
-rw-r--r--kvm/libkvm/libkvm.h868
-rw-r--r--kvm/scripts/65-kvm.rules1
-rwxr-xr-xkvm/scripts/kvm226
-rwxr-xr-xkvm/scripts/make-combined-release36
-rwxr-xr-xkvm/scripts/make-release67
-rwxr-xr-xkvm/scripts/mkbootdisk30
-rwxr-xr-xkvm/scripts/qemu-ifup5
-rwxr-xr-xkvm/scripts/run_img4
-rwxr-xr-xkvm/scripts/vmxcap215
-rw-r--r--kvm/user/COPYRIGHT4
-rw-r--r--kvm/user/Makefile60
-rwxr-xr-xkvm/user/balloon_ctl.c92
-rw-r--r--kvm/user/bootstrap.lds15
-rw-r--r--kvm/user/config-i386.mak10
-rw-r--r--kvm/user/config-ia64.mak7
-rw-r--r--kvm/user/config-powerpc-440.mak15
-rw-r--r--kvm/user/config-powerpc.mak39
-rw-r--r--kvm/user/config-x86-common.mak77
-rw-r--r--kvm/user/config-x86_64.mak12
-rwxr-xr-xkvm/user/configure75
-rw-r--r--kvm/user/flat.lds15
-rw-r--r--kvm/user/formats31
-rw-r--r--kvm/user/iotable.c53
-rw-r--r--kvm/user/iotable.h40
-rw-r--r--kvm/user/kvmtrace.c706
-rwxr-xr-xkvm/user/kvmtrace_format532
-rw-r--r--kvm/user/main-ppc.c383
-rw-r--r--kvm/user/main.c611
-rw-r--r--kvm/user/test/lib/libcflat.h42
-rw-r--r--kvm/user/test/lib/panic.c13
-rw-r--r--kvm/user/test/lib/powerpc/44x/map.c51
-rw-r--r--kvm/user/test/lib/powerpc/44x/timebase.S28
-rw-r--r--kvm/user/test/lib/powerpc/44x/timebase.h25
-rw-r--r--kvm/user/test/lib/powerpc/44x/tlbwe.S29
-rw-r--r--kvm/user/test/lib/powerpc/io.c35
-rw-r--r--kvm/user/test/lib/printf.c179
-rw-r--r--kvm/user/test/lib/string.c21
-rw-r--r--kvm/user/test/lib/x86/apic-defs.h133
-rw-r--r--kvm/user/test/lib/x86/apic.c143
-rw-r--r--kvm/user/test/lib/x86/apic.h34
-rw-r--r--kvm/user/test/lib/x86/fake-apic.h14
-rw-r--r--kvm/user/test/lib/x86/fwcfg.c40
-rw-r--r--kvm/user/test/lib/x86/fwcfg.h44
-rw-r--r--kvm/user/test/lib/x86/io.c23
-rw-r--r--kvm/user/test/lib/x86/smp.c128
-rw-r--r--kvm/user/test/lib/x86/smp.h17
-rw-r--r--kvm/user/test/powerpc/44x/tlbsx.S33
-rw-r--r--kvm/user/test/powerpc/44x/tlbwe.S27
-rw-r--r--kvm/user/test/powerpc/44x/tlbwe_16KB.S35
-rw-r--r--kvm/user/test/powerpc/44x/tlbwe_hole.S27
-rw-r--r--kvm/user/test/powerpc/cstart.S38
-rw-r--r--kvm/user/test/powerpc/exit.c23
-rw-r--r--kvm/user/test/powerpc/helloworld.c27
-rw-r--r--kvm/user/test/powerpc/io.S32
-rw-r--r--kvm/user/test/powerpc/spin.S4
-rw-r--r--kvm/user/test/powerpc/sprg.S7
-rw-r--r--kvm/user/test/x86/access.c604
-rw-r--r--kvm/user/test/x86/apic.c317
-rw-r--r--kvm/user/test/x86/bootstrap.S137
-rw-r--r--kvm/user/test/x86/cstart.S19
-rw-r--r--kvm/user/test/x86/cstart64.S225
-rw-r--r--kvm/user/test/x86/emulator.c258
-rw-r--r--kvm/user/test/x86/exit.c7
-rw-r--r--kvm/user/test/x86/hypercall.c31
-rw-r--r--kvm/user/test/x86/ioram.h7
-rw-r--r--kvm/user/test/x86/memtest1.S44
-rw-r--r--kvm/user/test/x86/msr.c52
-rw-r--r--kvm/user/test/x86/port80.c12
-rw-r--r--kvm/user/test/x86/print.S31
-rw-r--r--kvm/user/test/x86/print.h19
-rw-r--r--kvm/user/test/x86/realmode.c624
-rw-r--r--kvm/user/test/x86/realmode.lds12
-rw-r--r--kvm/user/test/x86/runtime.h6
-rw-r--r--kvm/user/test/x86/sieve.c89
-rw-r--r--kvm/user/test/x86/simple.S13
-rw-r--r--kvm/user/test/x86/smptest.c25
-rw-r--r--kvm/user/test/x86/stringio.S31
-rw-r--r--kvm/user/test/x86/test32.S8
-rw-r--r--kvm/user/test/x86/tsc.c38
-rw-r--r--kvm/user/test/x86/vm.c271
-rw-r--r--kvm/user/test/x86/vm.h10
-rw-r--r--kvm/user/test/x86/vmexit.c164
-rw-r--r--kvm/vgabios/.cvsignore1
-rw-r--r--kvm/vgabios/BUGS3
-rw-r--r--kvm/vgabios/COPYING504
-rw-r--r--kvm/vgabios/ChangeLog1264
-rw-r--r--kvm/vgabios/Makefile87
-rw-r--r--kvm/vgabios/Notes11
-rw-r--r--kvm/vgabios/README219
-rw-r--r--kvm/vgabios/TODO26
-rw-r--r--kvm/vgabios/biossums.c282
-rw-r--r--kvm/vgabios/clext.c1688
-rwxr-xr-xkvm/vgabios/dataseghack23
-rw-r--r--kvm/vgabios/tests/lfbprof/Makefile5
-rw-r--r--kvm/vgabios/tests/lfbprof/lfbprof.c594
-rw-r--r--kvm/vgabios/tests/lfbprof/lfbprof.h149
-rw-r--r--kvm/vgabios/tests/testbios.c353
-rw-r--r--kvm/vgabios/vbe.c1432
-rw-r--r--kvm/vgabios/vbe.h313
-rw-r--r--kvm/vgabios/vbe_display_api.txt237
-rw-r--r--kvm/vgabios/vbetables-gen.c264
-rw-r--r--kvm/vgabios/vgabios.c3853
-rw-r--r--kvm/vgabios/vgabios.h47
-rw-r--r--kvm/vgabios/vgafonts.h784
-rw-r--r--kvm/vgabios/vgatables.h622
-rw-r--r--monitor.c33
-rw-r--r--osdep.c4
-rw-r--r--pc-bios/bios-vista.diff17
-rw-r--r--pc-bios/bochs-manifest24
-rw-r--r--pc-bios/openbios-sparcbin0 -> 506966 bytes
-rw-r--r--pc-bios/optionrom/Makefile2
-rw-r--r--pc-bios/optionrom/extboot.S695
-rw-r--r--pc-bios/optionrom/vapic.S315
-rwxr-xr-xpc-bios/vapic.binbin0 -> 8960 bytes
-rw-r--r--pc-bios/vgabios-cirrus.binbin35840 -> 35840 bytes
-rw-r--r--pc-bios/vgabios.binbin38400 -> 39936 bytes
-rw-r--r--posix-aio-compat.c67
-rw-r--r--qemu-common.h8
-rw-r--r--qemu-config.c4
-rw-r--r--qemu-kvm-helper.c40
-rw-r--r--qemu-kvm-ia64.c143
-rw-r--r--qemu-kvm-x86.c1705
-rw-r--r--qemu-kvm.c2669
-rw-r--r--qemu-kvm.h1169
-rw-r--r--qemu-lock.h4
-rw-r--r--qemu-monitor.hx15
-rw-r--r--qemu-options.hx32
-rw-r--r--qemu/pc-bios/extboot.binbin0 -> 1536 bytes
-rw-r--r--savevm.c23
-rw-r--r--sysemu.h6
-rw-r--r--target-i386/cpu.h24
-rw-r--r--target-i386/fake-exec.c50
-rw-r--r--target-i386/helper.c43
-rw-r--r--target-i386/kvm.c22
-rw-r--r--target-i386/libkvm.h28
-rw-r--r--target-i386/machine.c16
-rw-r--r--target-ia64/cpu.h85
-rw-r--r--target-ia64/exec.h61
-rw-r--r--target-ia64/fake-exec.c50
-rw-r--r--target-ia64/firmware.c715
-rw-r--r--target-ia64/firmware.h62
-rw-r--r--target-ia64/helper.c5
-rw-r--r--target-ia64/libkvm.c82
-rw-r--r--target-ia64/libkvm.h31
-rw-r--r--target-ia64/machine.c35
-rw-r--r--target-ia64/op.c22
-rw-r--r--target-ia64/op_helper.c104
-rw-r--r--target-ia64/translate.c39
-rw-r--r--target-ppc/cpu.h10
-rw-r--r--target-ppc/fake-exec.c104
-rw-r--r--target-ppc/helper.c1
-rw-r--r--target-ppc/libkvm.c102
-rw-r--r--target-ppc/libkvm.h36
-rw-r--r--target-ppc/machine.c1
-rw-r--r--vl.c113
-rw-r--r--vnc.h2
260 files changed, 44445 insertions, 180 deletions
diff --git a/.gitignore b/.gitignore
index cdd6aad97..bbd4d1706 100644
--- a/.gitignore
+++ b/.gitignore
@@ -48,4 +48,5 @@ pc-bios/bios-pq/status
pc-bios/vgabios-pq/status
pc-bios/optionrom/multiboot.bin
pc-bios/optionrom/multiboot.raw
+pc-bios/optionrom/extboot.bin
.stgit-*
diff --git a/Makefile b/Makefile
index c1fa08cec..8e3498b70 100644
--- a/Makefile
+++ b/Makefile
@@ -63,6 +63,18 @@ config-host.h-timestamp: config-host.mak
SUBDIR_RULES=$(patsubst %,subdir-%, $(TARGET_DIRS))
+ifeq ($(KVM_KMOD),yes)
+
+.PHONEY: kvm-kmod
+
+all: kvm-kmod
+
+kvm-kmod:
+ $(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) -C kvm/kernel V="$(V)" )
+
+
+endif
+
subdir-%: $(GENERATED_HEADERS)
$(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) -C $* V="$(V)" TARGET_DIR="$*/" all,)
@@ -94,6 +106,7 @@ block-obj-y = cutils.o cache-utils.o qemu-malloc.o qemu-option.o module.o
block-obj-y += nbd.o block.o aio.o aes.o osdep.o
block-obj-$(CONFIG_POSIX) += posix-aio-compat.o
block-obj-$(CONFIG_LINUX_AIO) += linux-aio.o
+block-obj-$(CONFIG_POSIX) += compatfd.o
block-nested-y += cow.o qcow.o vdi.o vmdk.o cloop.o dmg.o bochs.o vpc.o vvfat.o
block-nested-y += qcow2.o qcow2-refcount.o qcow2-cluster.o qcow2-snapshot.o
@@ -283,6 +296,8 @@ pxe-ne2k_pci.bin pxe-pcnet.bin \
pxe-rtl8139.bin pxe-virtio.bin \
bamboo.dtb petalogix-s3adsp1800.dtb \
multiboot.bin linuxboot.bin
+BLOBS += extboot.bin
+BLOBS += vapic.bin
else
BLOBS=
endif
@@ -305,7 +320,12 @@ endif
ifneq ($(BLOBS),)
$(INSTALL_DIR) "$(DESTDIR)$(datadir)"
set -e; for x in $(BLOBS); do \
+ if [ -f $(SRC_PATH)/pc-bios/$$x ];then \
$(INSTALL_DATA) $(SRC_PATH)/pc-bios/$$x "$(DESTDIR)$(datadir)"; \
+ fi \
+ ; if [ -f pc-bios/optionrom/$$x ];then \
+ $(INSTALL_DATA) pc-bios/optionrom/$$x "$(DESTDIR)$(datadir)"; \
+ fi \
done
endif
$(INSTALL_DIR) "$(DESTDIR)$(datadir)/keymaps"
@@ -315,6 +335,9 @@ endif
for d in $(TARGET_DIRS); do \
$(MAKE) -C $$d $@ || exit 1 ; \
done
+ifeq ($(KVM_KMOD),yes)
+ $(MAKE) -C kvm/kernel $@
+endif
# various test targets
test speed: all
@@ -435,6 +458,7 @@ tarbin:
$(datadir)/pxe-rtl8139.bin \
$(datadir)/pxe-pcnet.bin \
$(datadir)/pxe-e1000.bin \
+ $(datadir)/extboot.bin \
$(docdir)/qemu-doc.html \
$(docdir)/qemu-tech.html \
$(mandir)/man1/qemu.1 \
diff --git a/Makefile.hw b/Makefile.hw
index bd252f5ed..6f4dbc40d 100644
--- a/Makefile.hw
+++ b/Makefile.hw
@@ -25,7 +25,9 @@ obj-$(CONFIG_ESCC) += escc.o
# PCI watchdog devices
obj-y += wdt_i6300esb.o
-obj-y += msix.o
+# MSI-X depends on kvm for interrupt injection,
+# so moved it from Makefile.hw to Makefile.target for now
+# obj-y += msix.o
# PCI network cards
obj-y += ne2000.o
diff --git a/Makefile.target b/Makefile.target
index 7c1f30c1a..6037fed49 100644
--- a/Makefile.target
+++ b/Makefile.target
@@ -30,6 +30,8 @@ LIBS+=-lm
kvm.o kvm-all.o: QEMU_CFLAGS+=$(KVM_CFLAGS)
+CFLAGS += $(KVM_CFLAGS)
+
config-target.h: config-target.h-timestamp
config-target.h-timestamp: config-target.mak
@@ -40,12 +42,18 @@ all: $(PROGS)
#########################################################
# cpu emulator library
-libobj-y = exec.o translate-all.o cpu-exec.o translate.o
-libobj-y += tcg/tcg.o
+libobj-y = exec.o cpu-exec.o
+libobj-$(CONFIG_NO_CPU_EMULATION) += fake-exec.o
+libobj-$(CONFIG_CPU_EMULATION) += translate-all.o translate.o
+libobj-$(CONFIG_CPU_EMULATION) += tcg/tcg.o
libobj-$(CONFIG_SOFTFLOAT) += fpu/softfloat.o
libobj-$(CONFIG_NOSOFTFLOAT) += fpu/softfloat-native.o
libobj-y += op_helper.o helper.o
libobj-$(CONFIG_NEED_MMU) += mmu.o
+
+libobj-$(CONFIG_KVM) += kvm-tpr-opt.o
+libobj-$(CONFIG_KVM) += qemu-kvm-helper.o
+
libobj-$(TARGET_ARM) += neon_helper.o iwmmxt_helper.o
libobj-$(TARGET_ALPHA) += alpha_palcode.o
@@ -82,6 +90,8 @@ op_helper.o cpu-exec.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
# cpu_signal_handler() in cpu-exec.c.
signal.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
+qemu-kvm-helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
+
#########################################################
# Linux user emulator target
@@ -158,6 +168,10 @@ obj-y = vl.o async.o monitor.o pci.o pci_host.o pcie_host.o machine.o gdbstub.o
# need to fix this properly
obj-y += virtio-blk.o virtio-balloon.o virtio-net.o virtio-console.o virtio-pci.o
obj-$(CONFIG_KVM) += kvm.o kvm-all.o
+# MSI-X depends on kvm for interrupt injection,
+# so moved it from Makefile.hw to Makefile.target for now
+obj-y += msix.o
+
obj-$(CONFIG_ISA_MMIO) += isa_mmio.o
LIBS+=-lz
@@ -194,12 +208,25 @@ obj-i386-y += fdc.o mc146818rtc.o serial.o i8259.o i8254.o pcspk.o pc.o
obj-i386-y += cirrus_vga.o apic.o ioapic.o parallel.o acpi.o piix_pci.o
obj-i386-y += usb-uhci.o vmmouse.o vmport.o vmware_vga.o hpet.o
obj-i386-y += device-hotplug.o pci-hotplug.o smbios.o wdt_ib700.o
+obj-i386-y += extboot.o
obj-i386-y += ne2000-isa.o
+obj-i386-y += testdev.o
+
+obj-i386-$(CONFIG_KVM_PIT) += i8254-kvm.o
+obj-i386-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += device-assignment.o
+
+# Hardware support
+obj-ia64-y += ide.o pckbd.o vga.o $(SOUND_HW) dma.o $(AUDIODRV)
+obj-ia64-y += fdc.o mc146818rtc.o serial.o i8259.o ipf.o
+obj-ia64-y += cirrus_vga.o parallel.o acpi.o piix_pci.o
+obj-ia64-y += usb-uhci.o
+obj-ia64-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += device-assignment.o
# shared objects
obj-ppc-y = ppc.o ide/core.o ide/qdev.o ide/isa.o ide/pci.o ide/macio.o
obj-ppc-y += ide/cmd646.o
obj-ppc-y += vga.o vga-pci.o $(sound-obj-y) dma.o openpic.o
+obj-ppc-y += cirrus_vga.o
# PREP target
obj-ppc-y += pckbd.o serial.o i8259.o i8254.o fdc.o mc146818rtc.o
obj-ppc-y += prep_pci.o ppc_prep.o ne2000-isa.o
@@ -295,6 +322,11 @@ obj-m68k-y += m68k-semi.o dummy_m68k.o
obj-s390x-y = s390-virtio-bus.o s390-virtio.o
+ifeq ($(TARGET_ARCH), ia64)
+firmware.o: firmware.c
+ $(CC) $(HELPER_CFLAGS) $(CPPFLAGS) $(BASE_CFLAGS) -c -o $@ $<
+endif
+
main.o vl.o: QEMU_CFLAGS+=$(GPROF_CFLAGS)
vl.o: QEMU_CFLAGS+=$(SDL_CFLAGS)
diff --git a/block/raw-posix.c b/block/raw-posix.c
index 248e34cba..c204cf94e 100644
--- a/block/raw-posix.c
+++ b/block/raw-posix.c
@@ -27,6 +27,8 @@
#include "qemu-log.h"
#include "block_int.h"
#include "module.h"
+#include "compatfd.h"
+#include <assert.h>
#include "block/raw-posix-aio.h"
#ifdef CONFIG_COCOA
diff --git a/cache-utils.h b/cache-utils.h
index b45fde44e..e4f27ef51 100644
--- a/cache-utils.h
+++ b/cache-utils.h
@@ -34,7 +34,28 @@ static inline void flush_icache_range(unsigned long start, unsigned long stop)
asm volatile ("isync" : : : "memory");
}
+/*
+ * Is this correct for PPC?
+ */
+static inline void dma_flush_range(unsigned long start, unsigned long stop)
+{
+}
+
+#elif defined(__ia64__)
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+ while (start < stop) {
+ asm volatile ("fc %0" :: "r"(start));
+ start += 32;
+ }
+ asm volatile (";;sync.i;;srlz.i;;");
+}
+#define dma_flush_range(start, end) flush_icache_range(start, end)
+#define qemu_cache_utils_init(envp) do { (void) (envp); } while (0)
#else
+static inline void dma_flush_range(unsigned long start, unsigned long stop)
+{
+}
#define qemu_cache_utils_init(envp) do { (void) (envp); } while (0)
#endif
diff --git a/compat/sys/eventfd.h b/compat/sys/eventfd.h
new file mode 100644
index 000000000..f55d96adb
--- /dev/null
+++ b/compat/sys/eventfd.h
@@ -0,0 +1,13 @@
+#ifndef _COMPAT_SYS_EVENTFD
+#define _COMPAT_SYS_EVENTFD
+
+#include <unistd.h>
+#include <syscall.h>
+
+
+static inline int eventfd (int count, int flags)
+{
+ return syscall(SYS_eventfd, count, flags);
+}
+
+#endif
diff --git a/compatfd.c b/compatfd.c
new file mode 100644
index 000000000..4a00d95a3
--- /dev/null
+++ b/compatfd.c
@@ -0,0 +1,143 @@
+/*
+ * signalfd/eventfd compatibility
+ *
+ * Copyright IBM, Corp. 2008
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu-common.h"
+#include "compatfd.h"
+
+#include <sys/syscall.h>
+#include <pthread.h>
+
+struct sigfd_compat_info
+{
+ sigset_t mask;
+ int fd;
+};
+
+static void *sigwait_compat(void *opaque)
+{
+ struct sigfd_compat_info *info = opaque;
+ int err;
+ sigset_t all;
+
+ sigfillset(&all);
+ sigprocmask(SIG_BLOCK, &all, NULL);
+
+ do {
+ siginfo_t siginfo;
+
+ err = sigwaitinfo(&info->mask, &siginfo);
+ if (err == -1 && errno == EINTR) {
+ err = 0;
+ continue;
+ }
+
+ if (err > 0) {
+ char buffer[128];
+ size_t offset = 0;
+
+ memcpy(buffer, &err, sizeof(err));
+ while (offset < sizeof(buffer)) {
+ ssize_t len;
+
+ len = write(info->fd, buffer + offset,
+ sizeof(buffer) - offset);
+ if (len == -1 && errno == EINTR)
+ continue;
+
+ if (len <= 0) {
+ err = -1;
+ break;
+ }
+
+ offset += len;
+ }
+ }
+ } while (err >= 0);
+
+ return NULL;
+}
+
+static int qemu_signalfd_compat(const sigset_t *mask)
+{
+ pthread_attr_t attr;
+ pthread_t tid;
+ struct sigfd_compat_info *info;
+ int fds[2];
+
+ info = malloc(sizeof(*info));
+ if (info == NULL) {
+ errno = ENOMEM;
+ return -1;
+ }
+
+ if (pipe(fds) == -1) {
+ free(info);
+ return -1;
+ }
+
+ qemu_set_cloexec(fds[0]);
+ qemu_set_cloexec(fds[1]);
+
+ memcpy(&info->mask, mask, sizeof(*mask));
+ info->fd = fds[1];
+
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
+
+ pthread_create(&tid, &attr, sigwait_compat, info);
+
+ pthread_attr_destroy(&attr);
+
+ return fds[0];
+}
+
+int qemu_signalfd(const sigset_t *mask)
+{
+#if defined(CONFIG_SIGNALFD)
+ int ret;
+
+ ret = syscall(SYS_signalfd, -1, mask, _NSIG / 8);
+ if (ret != -1) {
+ qemu_set_cloexec(ret);
+ return ret;
+ }
+#endif
+
+ return qemu_signalfd_compat(mask);
+}
+
+int qemu_eventfd(int *fds)
+{
+ int ret;
+
+#if defined(CONFIG_EVENTFD)
+ ret = syscall(SYS_eventfd, 0);
+ if (ret >= 0) {
+ fds[0] = ret;
+ qemu_set_cloexec(ret);
+ if ((fds[1] = dup(ret)) == -1) {
+ close(ret);
+ return -1;
+ }
+ qemu_set_cloexec(fds[1]);
+ return 0;
+ }
+#endif
+
+ ret = pipe(fds);
+ if (ret != -1) {
+ qemu_set_cloexec(fds[0]);
+ qemu_set_cloexec(fds[1]);
+ }
+ return ret;
+}
diff --git a/compatfd.h b/compatfd.h
new file mode 100644
index 000000000..06b0b6ba5
--- /dev/null
+++ b/compatfd.h
@@ -0,0 +1,45 @@
+/*
+ * signalfd/eventfd compatibility
+ *
+ * Copyright IBM, Corp. 2008
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_COMPATFD_H
+#define QEMU_COMPATFD_H
+
+#include <signal.h>
+
+struct qemu_signalfd_siginfo {
+ uint32_t ssi_signo; /* Signal number */
+ int32_t ssi_errno; /* Error number (unused) */
+ int32_t ssi_code; /* Signal code */
+ uint32_t ssi_pid; /* PID of sender */
+ uint32_t ssi_uid; /* Real UID of sender */
+ int32_t ssi_fd; /* File descriptor (SIGIO) */
+ uint32_t ssi_tid; /* Kernel timer ID (POSIX timers) */
+ uint32_t ssi_band; /* Band event (SIGIO) */
+ uint32_t ssi_overrun; /* POSIX timer overrun count */
+ uint32_t ssi_trapno; /* Trap number that caused signal */
+ int32_t ssi_status; /* Exit status or signal (SIGCHLD) */
+ int32_t ssi_int; /* Integer sent by sigqueue(2) */
+ uint64_t ssi_ptr; /* Pointer sent by sigqueue(2) */
+ uint64_t ssi_utime; /* User CPU time consumed (SIGCHLD) */
+ uint64_t ssi_stime; /* System CPU time consumed (SIGCHLD) */
+ uint64_t ssi_addr; /* Address that generated signal
+ (for hardware-generated signals) */
+ uint8_t pad[48]; /* Pad size to 128 bytes (allow for
+ additional fields in the future) */
+};
+
+int qemu_signalfd(const sigset_t *mask);
+
+int qemu_eventfd(int *fds);
+
+#endif
diff --git a/configure b/configure
index 5f463b05b..88ba00285 100755
--- a/configure
+++ b/configure
@@ -160,7 +160,7 @@ else
cpu=`uname -m`
fi
-target_list=""
+target_list="x86_64-softmmu"
case "$cpu" in
alpha|cris|ia64|m68k|microblaze|ppc|ppc64|sparc64)
cpu="$cpu"
@@ -197,6 +197,16 @@ case "$cpu" in
;;
esac
+kvm_version() {
+ local fname="$(dirname "$0")/KVM_VERSION"
+
+ if test -f "$fname"; then
+ cat "$fname"
+ else
+ echo "qemu-kvm-devel"
+ fi
+}
+
# Default value for a variable defining feature "foo".
# * foo="no" feature will only be used if --enable-foo arg is given
# * foo="" feature will be searched for, and if found, will be used
@@ -250,10 +260,15 @@ guest_base=""
uname_release=""
io_thread="no"
mixemu="no"
+kvm_trace="no"
+kvm_cap_pit=""
+kvm_cap_device_assignment=""
kerneldir=""
aix="no"
blobs="yes"
-pkgversion=""
+pkgversion=" ($(kvm_version))"
+cpu_emulation="yes"
+kvm_kmod="no"
check_utests="no"
user_pie="no"
zero_malloc=""
@@ -389,6 +404,13 @@ AIX)
if [ "$cpu" = "i386" -o "$cpu" = "x86_64" ] ; then
audio_possible_drivers="$audio_possible_drivers fmod"
fi
+ if [ "$cpu" = "ia64" ] ; then
+ xen="no"
+ target_list="ia64-softmmu"
+ cpu_emulation="no"
+ gdbstub="no"
+ slirp="no"
+ fi
;;
esac
@@ -518,6 +540,14 @@ for opt do
;;
--enable-kvm) kvm="yes"
;;
+ --disable-kvm-cap-pit) kvm_cap_pit="no"
+ ;;
+ --enable-kvm-cap-pit) kvm_cap_pit="yes"
+ ;;
+ --disable-kvm-cap-device-assignment) kvm_cap_device_assignment="no"
+ ;;
+ --enable-kvm-cap-device-assignment) kvm_cap_device_assignment="yes"
+ ;;
--enable-profiler) profiler="yes"
;;
--enable-cocoa)
@@ -595,12 +625,16 @@ for opt do
;;
--kerneldir=*) kerneldir="$optarg"
;;
+ --with-kvm-trace) kvm_trace="yes"
+ ;;
--with-pkgversion=*) pkgversion=" ($optarg)"
;;
--disable-docs) docs="no"
;;
--enable-docs) docs="yes"
;;
+ --disable-cpu-emulation) cpu_emulation="no"
+ ;;
*) echo "ERROR: unknown option $opt"; show_help="yes"
;;
esac
@@ -722,6 +756,10 @@ echo " --disable-bluez disable bluez stack connectivity"
echo " --enable-bluez enable bluez stack connectivity"
echo " --disable-kvm disable KVM acceleration support"
echo " --enable-kvm enable KVM acceleration support"
+echo " --disable-cap-kvm-pit disable KVM pit support"
+echo " --enable-cap-kvm-pit enable KVM pit support"
+echo " --disable-cap-device-assignment disable KVM device assignment support"
+echo " --enable-cap-device-assignment enable KVM device assignment support"
echo " --disable-nptl disable usermode NPTL support"
echo " --enable-nptl enable usermode NPTL support"
echo " --enable-system enable all system emulation targets"
@@ -753,6 +791,8 @@ echo " --enable-linux-aio enable Linux AIO support"
echo " --enable-io-thread enable IO thread"
echo " --disable-blobs disable installing provided firmware blobs"
echo " --kerneldir=PATH look for kernel includes in PATH"
+echo " --with-kvm-trace enable building the KVM module with the kvm trace option"
+echo " --disable-cpu-emulation disables use of qemu cpu emulation code"
echo ""
echo "NOTE: The object files are built at the place where configure is launched"
exit 1
@@ -1384,8 +1424,22 @@ EOF
kvm_cflags="$kvm_cflags -I$kerneldir/arch/$cpu/include"
fi
else
- kvm_cflags=""
+ case "$cpu" in
+ i386 | x86_64)
+ kvm_arch="x86"
+ ;;
+ ppc)
+ kvm_arch="powerpc"
+ ;;
+ *)
+ kvm_arch="$cpu"
+ ;;
+ esac
+ kvm_cflags="-I$source_path/kvm/include"
+ kvm_cflags="$kvm_cflags -include $source_path/kvm/include/linux/config.h"
+ kvm_cflags="$kvm_cflags -I$source_path/kvm/include/$kvm_arch"
fi
+ kvm_cflags="$kvm_cflags -idirafter $source_path/compat"
if compile_prog "$kvm_cflags" "" ; then
kvm=yes
else
@@ -1408,6 +1462,75 @@ EOF
fi
##########################################
+# test for KVM_CAP_PIT
+
+if test "$kvm_cap_pit" != "no" ; then
+ if test "$kvm" = "no" -a "$kvm_cap_pit" = "yes" ; then
+ feature_not_found "kvm_cap_pit (kvm is not enabled)"
+ fi
+ cat > $TMPC <<EOF
+#include <linux/kvm.h>
+#ifndef KVM_CAP_PIT
+#error "kvm no pit capability"
+#endif
+int main(void) { return 0; }
+EOF
+ if compile_prog "$kvm_cflags" ""; then
+ kvm_cap_pit=yes
+ else
+ if test "$kvm_cap_pit" = "yes" ; then
+ feature_not_found "kvm_cap_pit"
+ fi
+ kvm_cap_pit=no
+ fi
+fi
+
+##########################################
+# test for KVM_CAP_DEVICE_ASSIGNMENT
+
+if test "$kvm_cap_device_assignment" != "no" ; then
+ if test "$kvm" = "no" -a "$kvm_cap_device_assignment" = "yes" ; then
+ feature_not_found "kvm_cap_device_assignment (kvm is not enabled)"
+ fi
+ cat > $TMPC <<EOF
+#include <linux/kvm.h>
+#ifndef KVM_CAP_DEVICE_ASSIGNMENT
+#error "kvm no device assignment capability"
+#endif
+int main(void) { return 0; }
+EOF
+ if compile_prog "$kvm_cflags" "" ; then
+ kvm_cap_device_assignment=yes
+ else
+ if test "$kvm_cap_device_assignment" = "yes" ; then
+ feature_not_found "kvm_cap_device_assigment"
+ fi
+ kvm_cap_device_assignment=no
+ fi
+fi
+
+##########################################
+# libpci probe for kvm_cap_device_assignment
+if test $kvm_cap_device_assignment = "yes" ; then
+ cat > $TMPC << EOF
+#include <pci/pci.h>
+#ifndef PCI_VENDOR_ID
+#error NO LIBPCI
+#endif
+int main(void) { struct pci_access a; pci_init(&a); return 0; }
+EOF
+ if compile_prog "" "-lpci -lz" ; then
+ libs_softmmu="-lpci -lz $libs_softmmu"
+ else
+ echo
+ echo "Error: libpci check failed"
+ echo "Disable KVM Device Assignment capability."
+ echo
+ kvm_cap_device_assignment=no
+ fi
+fi
+
+##########################################
# pthread probe
PTHREADLIBS_LIST="-lpthread -lpthreadGC2"
@@ -1613,6 +1736,21 @@ if compile_prog "" "" ; then
splice=yes
fi
+##########################################
+# signalfd probe
+signalfd="no"
+cat > $TMPC << EOF
+#define _GNU_SOURCE
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <signal.h>
+int main(void) { return syscall(SYS_signalfd, -1, NULL, _NSIG / 8); }
+EOF
+
+if $cc $ARCH_CFLAGS -o $TMPE $TMPC 2> /dev/null ; then
+ signalfd=yes
+fi
+
# check if eventfd is supported
eventfd=no
cat > $TMPC << EOF
@@ -1842,6 +1980,20 @@ else
binsuffix="/bin"
fi
+if test -f kvm/kernel/configure; then
+ kvm_kmod="yes"
+ kmod_args=""
+ if test -n "$kerneldir"; then
+ kmod_args="--kerneldir=$kerneldir"
+ fi
+ if test "$kvm_trace" = "yes"; then
+ kmod_args="$kmod_args --with-kvm-trace"
+ fi
+ # hope there are no spaces in kmod_args; can't use arrays because of
+ # dash.
+ (cd kvm/kernel; ./configure $kmod_args)
+fi
+
echo "Install prefix $prefix"
echo "BIOS directory $prefix$datasuffix"
echo "binary directory $prefix$binsuffix"
@@ -1885,6 +2037,7 @@ if test -n "$sparc_cpu"; then
echo "Target Sparc Arch $sparc_cpu"
fi
echo "xen support $xen"
+echo "CPU emulation $cpu_emulation"
echo "brlapi support $brlapi"
echo "bluez support $bluez"
echo "Documentation $docs"
@@ -1898,6 +2051,9 @@ echo "IO thread $io_thread"
echo "Linux AIO support $linux_aio"
echo "Install blobs $blobs"
echo "KVM support $kvm"
+echo "KVM PIT support $kvm_cap_pit"
+echo "KVM device assig. $kvm_cap_device_assignment"
+echo "KVM trace support $kvm_trace"
echo "fdt support $fdt"
echo "preadv support $preadv"
echo "fdatasync $fdatasync"
@@ -2104,6 +2260,9 @@ fi
if test "$fdt" = "yes" ; then
echo "CONFIG_FDT=y" >> $config_host_mak
fi
+if test "$signalfd" = "yes" ; then
+ echo "CONFIG_SIGNALFD=y" >> $config_host_mak
+fi
if test "$need_offsetof" = "yes" ; then
echo "CONFIG_NEED_OFFSETOF=y" >> $config_host_mak
fi
@@ -2113,6 +2272,11 @@ fi
if test "$fdatasync" = "yes" ; then
echo "CONFIG_FDATASYNC=y" >> $config_host_mak
fi
+if test $cpu_emulation = "yes"; then
+ echo "CONFIG_CPU_EMULATION=y" >> $config_host_mak
+else
+ echo "CONFIG_NO_CPU_EMULATION=y" >> $config_host_mak
+fi
# XXX: suppress that
if [ "$bsd" = "yes" ] ; then
@@ -2138,6 +2302,8 @@ bsd)
;;
esac
+echo "KVM_KMOD=$kvm_kmod" >> $config_host_mak
+
tools=
if test `expr "$target_list" : ".*softmmu.*"` != 0 ; then
tools="qemu-img\$(EXESUF) qemu-io\$(EXESUF) $tools"
@@ -2288,6 +2454,9 @@ case "$target_arch2" in
TARGET_BASE_ARCH=i386
target_phys_bits=64
;;
+ ia64)
+ target_phys_bits=64
+ ;;
alpha)
target_phys_bits=64
;;
@@ -2418,6 +2587,12 @@ case "$target_arch2" in
\( "$target_arch2" = "i386" -a "$cpu" = "x86_64" \) \) ; then
echo "CONFIG_KVM=y" >> $config_target_mak
echo "KVM_CFLAGS=$kvm_cflags" >> $config_target_mak
+ if test $kvm_cap_pit = "yes" ; then
+ echo "CONFIG_KVM_PIT=y" >> $config_target_mak
+ fi
+ if test $kvm_cap_device_assignment = "yes" ; then
+ echo "CONFIG_KVM_DEVICE_ASSIGNMENT=y" >> $config_target_mak
+ fi
fi
esac
echo "TARGET_PHYS_ADDR_BITS=$target_phys_bits" >> $config_target_mak
@@ -2626,7 +2801,7 @@ if test "$source_path_used" = "yes" ; then
fi
# temporary config to build submodules
-for rom in seabios vgabios ; do
+for rom in seabios vgabios; do
config_mak=roms/$rom/config.mak
echo "# Automatically generated by configure - do not modify" >> $config_mak
echo "SRC_PATH=$source_path/roms/$rom" >> $config_mak
diff --git a/cpu-all.h b/cpu-all.h
index 57b69f886..8ed76c74d 100644
--- a/cpu-all.h
+++ b/cpu-all.h
@@ -849,6 +849,7 @@ extern int phys_ram_fd;
extern uint8_t *phys_ram_dirty;
extern ram_addr_t ram_size;
extern ram_addr_t last_ram_offset;
+extern uint8_t *bios_mem;
/* physical memory access */
diff --git a/cpu-common.h b/cpu-common.h
index 630237203..5e5956489 100644
--- a/cpu-common.h
+++ b/cpu-common.h
@@ -34,6 +34,7 @@ void qemu_ram_free(ram_addr_t addr);
/* This should only be used for ram local to a device. */
void *qemu_get_ram_ptr(ram_addr_t addr);
/* This should not be used by devices. */
+int do_qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
ram_addr_t qemu_ram_addr_from_host(void *ptr);
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
diff --git a/cpu-defs.h b/cpu-defs.h
index 95068b530..cf502e992 100644
--- a/cpu-defs.h
+++ b/cpu-defs.h
@@ -27,6 +27,7 @@
#include <setjmp.h>
#include <inttypes.h>
#include <signal.h>
+#include <pthread.h>
#include "osdep.h"
#include "qemu-queue.h"
#include "targphys.h"
@@ -134,6 +135,16 @@ typedef struct CPUWatchpoint {
QTAILQ_ENTRY(CPUWatchpoint) entry;
} CPUWatchpoint;
+/* forward decleration */
+struct qemu_work_item;
+
+struct KVMCPUState {
+ pthread_t thread;
+ int signalled;
+ struct qemu_work_item *queued_work_first, *queued_work_last;
+ int regs_modified;
+};
+
#define CPU_TEMP_BUF_NLONGS 128
#define CPU_COMMON \
struct TranslationBlock *current_tb; /* currently executing TB */ \
@@ -146,8 +157,6 @@ typedef struct CPUWatchpoint {
target_ulong mem_io_vaddr; /* target virtual addr at which the \
memory was accessed */ \
uint32_t halted; /* Nonzero if the CPU is in suspend state */ \
- uint32_t stop; /* Stop request */ \
- uint32_t stopped; /* Artificially stopped */ \
uint32_t interrupt_request; \
volatile sig_atomic_t exit_request; \
/* The meaning of the MMU modes is defined in the target code. */ \
@@ -188,6 +197,7 @@ typedef struct CPUWatchpoint {
int nr_cores; /* number of cores within this CPU package */ \
int nr_threads;/* number of threads within this CPU */ \
int running; /* Nonzero if cpu is currently running(usermode). */ \
+ int thread_id; \
/* user data */ \
void *opaque; \
\
@@ -197,6 +207,9 @@ typedef struct CPUWatchpoint {
const char *cpu_model_str; \
struct KVMState *kvm_state; \
struct kvm_run *kvm_run; \
- int kvm_fd;
+ int kvm_fd; \
+ uint32_t stop; /* Stop request */ \
+ uint32_t stopped; /* Artificially stopped */ \
+ struct KVMCPUState kvm_cpu_state;
#endif
diff --git a/cpu-exec.c b/cpu-exec.c
index 3246c9e2c..040d4741b 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -19,7 +19,9 @@
#include "config.h"
#include "exec.h"
#include "disas.h"
+#if !defined(TARGET_IA64)
#include "tcg.h"
+#endif
#include "kvm.h"
#if !defined(CONFIG_SOFTMMU)
@@ -38,6 +40,8 @@
#endif
#endif
+#include "qemu-kvm.h"
+
#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
// Work around ugly bugs in glibc that mangle global register contents
#undef env
@@ -252,6 +256,7 @@ int cpu_exec(CPUState *env1)
#elif defined(TARGET_SH4)
#elif defined(TARGET_CRIS)
#elif defined(TARGET_S390X)
+#elif defined(TARGET_IA64)
/* XXXXX */
#else
#error unsupported target CPU
@@ -319,6 +324,8 @@ int cpu_exec(CPUState *env1)
do_interrupt(env);
#elif defined(TARGET_M68K)
do_interrupt(0);
+#elif defined(TARGET_IA64)
+ do_interrupt(env);
#endif
#endif
}
@@ -674,6 +681,7 @@ int cpu_exec(CPUState *env1)
#elif defined(TARGET_MICROBLAZE)
#elif defined(TARGET_MIPS)
#elif defined(TARGET_SH4)
+#elif defined(TARGET_IA64)
#elif defined(TARGET_ALPHA)
#elif defined(TARGET_CRIS)
#elif defined(TARGET_S390X)
diff --git a/cutils.c b/cutils.c
index 2365e68d9..be99b2167 100644
--- a/cutils.c
+++ b/cutils.c
@@ -218,6 +218,11 @@ void qemu_iovec_to_buffer(QEMUIOVector *qiov, void *buf)
}
}
+/*
+ * No dma flushing needed here, as the aio code will call dma_bdrv_cb()
+ * on completion as well, which will result in a call to
+ * dma_bdrv_unmap() which will do the flushing ....
+ */
void qemu_iovec_from_buffer(QEMUIOVector *qiov, const void *buf, size_t count)
{
const uint8_t *p = (const uint8_t *)buf;
diff --git a/dma-helpers.c b/dma-helpers.c
index 712ed897f..d4fc077c0 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -160,6 +160,10 @@ static BlockDriverAIOCB *dma_bdrv_io(
dbs->is_write = is_write;
dbs->bh = NULL;
qemu_iovec_init(&dbs->iov, sg->nsg);
+ /*
+ * DMA flushing is handled in dma_bdrv_cb() calling dma_bdrv_unmap()
+ * so we don't need to do that here.
+ */
dma_bdrv_cb(dbs, 0);
if (!dbs->acb) {
qemu_aio_release(dbs);
diff --git a/exec.c b/exec.c
index 5969eb27c..4bae1d9ef 100644
--- a/exec.c
+++ b/exec.c
@@ -34,7 +34,13 @@
#include "cpu.h"
#include "exec-all.h"
#include "qemu-common.h"
+#include "cache-utils.h"
+
+#if !defined(TARGET_IA64)
#include "tcg.h"
+#endif
+#include "qemu-kvm.h"
+
#include "hw/hw.h"
#include "osdep.h"
#include "kvm.h"
@@ -74,6 +80,8 @@
#define TARGET_PHYS_ADDR_SPACE_BITS 42
#elif defined(TARGET_I386)
#define TARGET_PHYS_ADDR_SPACE_BITS 36
+#elif defined(TARGET_IA64)
+#define TARGET_PHYS_ADDR_SPACE_BITS 36
#else
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#endif
@@ -111,6 +119,7 @@ uint8_t *code_gen_ptr;
#if !defined(CONFIG_USER_ONLY)
int phys_ram_fd;
uint8_t *phys_ram_dirty;
+uint8_t *bios_mem;
static int in_migration;
typedef struct RAMBlock {
@@ -412,6 +421,9 @@ static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
static void code_gen_alloc(unsigned long tb_size)
{
+ if (kvm_enabled())
+ return;
+
#ifdef USE_STATIC_CODE_GEN_BUFFER
code_gen_buffer = static_code_gen_buffer;
code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
@@ -588,6 +600,11 @@ void cpu_exec_init(CPUState *env)
env->numa_node = 0;
QTAILQ_INIT(&env->breakpoints);
QTAILQ_INIT(&env->watchpoints);
+#ifdef __WIN32
+ env->thread_id = GetCurrentProcessId();
+#else
+ env->thread_id = getpid();
+#endif
*penv = env;
#if defined(CONFIG_USER_ONLY)
cpu_list_unlock();
@@ -1553,6 +1570,8 @@ void cpu_interrupt(CPUState *env, int mask)
old_mask = env->interrupt_request;
env->interrupt_request |= mask;
+ if (kvm_enabled() && !kvm_irqchip_in_kernel())
+ kvm_update_interrupt_request(env);
#ifndef CONFIG_USER_ONLY
/*
@@ -1880,7 +1899,6 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
int cpu_physical_memory_set_dirty_tracking(int enable)
{
- in_migration = enable;
if (kvm_enabled()) {
return kvm_set_migration_log(enable);
}
@@ -2404,6 +2422,113 @@ void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
kvm_uncoalesce_mmio_region(addr, size);
}
+#ifdef __linux__
+
+#include <sys/vfs.h>
+
+#define HUGETLBFS_MAGIC 0x958458f6
+
+static long gethugepagesize(const char *path)
+{
+ struct statfs fs;
+ int ret;
+
+ do {
+ ret = statfs(path, &fs);
+ } while (ret != 0 && errno == EINTR);
+
+ if (ret != 0) {
+ perror("statfs");
+ return 0;
+ }
+
+ if (fs.f_type != HUGETLBFS_MAGIC)
+ fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
+
+ return fs.f_bsize;
+}
+
+static void *file_ram_alloc(ram_addr_t memory, const char *path)
+{
+ char *filename;
+ void *area;
+ int fd;
+#ifdef MAP_POPULATE
+ int flags;
+#endif
+ unsigned long hpagesize;
+ extern int mem_prealloc;
+
+ if (!path) {
+ return NULL;
+ }
+
+ hpagesize = gethugepagesize(path);
+ if (!hpagesize) {
+ return NULL;
+ }
+
+ if (memory < hpagesize) {
+ return NULL;
+ }
+
+ if (kvm_enabled() && !kvm_has_sync_mmu()) {
+ fprintf(stderr, "host lacks mmu notifiers, disabling --mem-path\n");
+ return NULL;
+ }
+
+ if (asprintf(&filename, "%s/kvm.XXXXXX", path) == -1) {
+ return NULL;
+ }
+
+ fd = mkstemp(filename);
+ if (fd < 0) {
+ perror("mkstemp");
+ free(filename);
+ return NULL;
+ }
+ unlink(filename);
+ free(filename);
+
+ memory = (memory+hpagesize-1) & ~(hpagesize-1);
+
+ /*
+ * ftruncate is not supported by hugetlbfs in older
+ * hosts, so don't bother checking for errors.
+ * If anything goes wrong with it under other filesystems,
+ * mmap will fail.
+ */
+ ftruncate(fd, memory);
+
+#ifdef MAP_POPULATE
+ /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
+ * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
+ * to sidestep this quirk.
+ */
+ flags = mem_prealloc ? MAP_POPULATE|MAP_SHARED : MAP_PRIVATE;
+ area = mmap(0, memory, PROT_READ|PROT_WRITE, flags, fd, 0);
+#else
+ area = mmap(0, memory, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
+#endif
+ if (area == MAP_FAILED) {
+ perror("alloc_mem_area: can't mmap hugetlbfs pages");
+ close(fd);
+ return (NULL);
+ }
+ return area;
+}
+
+#else
+
+static void *file_ram_alloc(ram_addr_t memory, const char *path)
+{
+ return NULL;
+}
+
+#endif
+
+extern const char *mem_path;
+
ram_addr_t qemu_ram_alloc(ram_addr_t size)
{
RAMBlock *new_block;
@@ -2411,16 +2536,20 @@ ram_addr_t qemu_ram_alloc(ram_addr_t size)
size = TARGET_PAGE_ALIGN(size);
new_block = qemu_malloc(sizeof(*new_block));
+ new_block->host = file_ram_alloc(size, mem_path);
+ if (!new_block->host) {
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
/* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
- new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ new_block->host = mmap((void*)0x1000000, size,
+ PROT_EXEC|PROT_READ|PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
#else
- new_block->host = qemu_vmalloc(size);
+ new_block->host = qemu_vmalloc(size);
#endif
#ifdef MADV_MERGEABLE
- madvise(new_block->host, size, MADV_MERGEABLE);
+ madvise(new_block->host, size, MADV_MERGEABLE);
#endif
+ }
new_block->offset = last_ram_offset;
new_block->length = size;
@@ -2482,9 +2611,7 @@ void *qemu_get_ram_ptr(ram_addr_t addr)
return block->host + (addr - block->offset);
}
-/* Some of the softmmu routines need to translate from a host pointer
- (typically a TLB entry) back to a ram offset. */
-ram_addr_t qemu_ram_addr_from_host(void *ptr)
+int do_qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
{
RAMBlock *prev;
RAMBlock **prevp;
@@ -2501,11 +2628,23 @@ ram_addr_t qemu_ram_addr_from_host(void *ptr)
prev = block;
block = block->next;
}
- if (!block) {
+ if (!block)
+ return -1;
+ *ram_addr = block->offset + (host - block->host);
+ return 0;
+}
+
+/* Some of the softmmu routines need to translate from a host pointer
+ (typically a TLB entry) back to a ram offset. */
+ram_addr_t qemu_ram_addr_from_host(void *ptr)
+{
+ ram_addr_t ram_addr;
+
+ if (do_qemu_ram_addr_from_host(ptr, &ram_addr)) {
fprintf(stderr, "Bad ram pointer %p\n", ptr);
abort();
}
- return block->offset + (host - block->host);
+ return ram_addr;
}
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
@@ -3091,6 +3230,11 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
(0xff & ~CODE_DIRTY_FLAG);
}
+ /* qemu doesn't execute guest code directly, but kvm does
+ therefore flush instruction caches */
+ if (kvm_enabled())
+ flush_icache_range((unsigned long)ptr,
+ ((unsigned long)ptr)+l);
}
} else {
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
@@ -3283,6 +3427,8 @@ void *cpu_physical_memory_map(target_phys_addr_t addr,
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
int is_write, target_phys_addr_t access_len)
{
+ unsigned long flush_len = (unsigned long)access_len;
+
if (buffer != bounce.buffer) {
if (is_write) {
ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
@@ -3300,7 +3446,9 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
}
addr1 += l;
access_len -= l;
- }
+ }
+ dma_flush_range((unsigned long)buffer,
+ (unsigned long)buffer + flush_len);
}
return;
}
@@ -3668,7 +3816,9 @@ void dump_exec_info(FILE *f,
cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
+#ifdef CONFIG_PROFILER
tcg_dump_info(f, cpu_fprintf);
+#endif
}
#if !defined(CONFIG_USER_ONLY)
diff --git a/fpu/softfloat-native.c b/fpu/softfloat-native.c
index 8d64f4eff..cb0e97be2 100644
--- a/fpu/softfloat-native.c
+++ b/fpu/softfloat-native.c
@@ -5,6 +5,7 @@
#if defined(CONFIG_SOLARIS)
#include <fenv.h>
#endif
+#include "config-host.h"
void set_float_rounding_mode(int val STATUS_PARAM)
{
diff --git a/gdbstub.c b/gdbstub.c
index 5320b1c14..0a738bf99 100644
--- a/gdbstub.c
+++ b/gdbstub.c
@@ -34,6 +34,7 @@
#include "sysemu.h"
#include "gdbstub.h"
#endif
+#include "qemu-kvm.h"
#define MAX_PACKET_LENGTH 4096
diff --git a/hw/acpi.c b/hw/acpi.c
index 9a69e7df1..ec049a2e8 100644
--- a/hw/acpi.c
+++ b/hw/acpi.c
@@ -23,6 +23,8 @@
#include "i2c.h"
#include "smbus.h"
#include "kvm.h"
+#include "qemu-kvm.h"
+#include "string.h"
//#define DEBUG
@@ -521,6 +523,13 @@ i2c_bus *piix4_pm_init(PCIBus *bus, int devfn, uint32_t smb_io_base,
pci_conf[0x40] = 0x01; /* PM io base read only bit */
+#if defined(TARGET_IA64)
+ pci_conf[0x40] = 0x41; /* PM io base read only bit */
+ pci_conf[0x41] = 0x1f;
+ pm_write_config(s, 0x80, 0x01, 1); /*Set default pm_io_base 0x1f40*/
+ s->pmcntrl = SCI_EN;
+#endif
+
register_ioport_write(0xb2, 2, 1, pm_smi_writeb, s);
register_ioport_read(0xb2, 2, 1, pm_smi_readb, s);
@@ -559,12 +568,14 @@ i2c_bus *piix4_pm_init(PCIBus *bus, int devfn, uint32_t smb_io_base,
}
#define GPE_BASE 0xafe0
+#define PROC_BASE 0xaf00
#define PCI_BASE 0xae00
#define PCI_EJ_BASE 0xae08
struct gpe_regs {
uint16_t sts; /* status */
uint16_t en; /* enabled */
+ uint8_t cpus_sts[32];
};
struct pci_status {
@@ -587,6 +598,10 @@ static uint32_t gpe_readb(void *opaque, uint32_t addr)
uint32_t val = 0;
struct gpe_regs *g = opaque;
switch (addr) {
+ case PROC_BASE ... PROC_BASE+31:
+ val = g->cpus_sts[addr - PROC_BASE];
+ break;
+
case GPE_BASE:
case GPE_BASE + 1:
val = gpe_read_val(g->sts, addr);
@@ -629,6 +644,10 @@ static void gpe_writeb(void *opaque, uint32_t addr, uint32_t val)
{
struct gpe_regs *g = opaque;
switch (addr) {
+ case PROC_BASE ... PROC_BASE + 31:
+ /* don't allow to change cpus_sts from inside a guest */
+ break;
+
case GPE_BASE:
case GPE_BASE + 1:
gpe_reset_val(&g->sts, addr, val);
@@ -712,22 +731,72 @@ static void pciej_write(void *opaque, uint32_t addr, uint32_t val)
#endif
}
+static const char *model;
+
static int piix4_device_hotplug(PCIDevice *dev, int state);
-void piix4_acpi_system_hot_add_init(PCIBus *bus)
+void piix4_acpi_system_hot_add_init(PCIBus *bus, const char *cpu_model)
{
+ int i = 0, cpus = smp_cpus;
+
+ while (cpus > 0) {
+ gpe.cpus_sts[i++] = (cpus < 8) ? (1 << cpus) - 1 : 0xff;
+ cpus -= 8;
+ }
register_ioport_write(GPE_BASE, 4, 1, gpe_writeb, &gpe);
register_ioport_read(GPE_BASE, 4, 1, gpe_readb, &gpe);
+ register_ioport_write(PROC_BASE, 32, 1, gpe_writeb, &gpe);
+ register_ioport_read(PROC_BASE, 32, 1, gpe_readb, &gpe);
+
register_ioport_write(PCI_BASE, 8, 4, pcihotplug_write, &pci0_status);
register_ioport_read(PCI_BASE, 8, 4, pcihotplug_read, &pci0_status);
register_ioport_write(PCI_EJ_BASE, 4, 4, pciej_write, bus);
register_ioport_read(PCI_EJ_BASE, 4, 4, pciej_read, bus);
+ model = cpu_model;
+
pci_bus_hotplug(bus, piix4_device_hotplug);
}
+#if defined(TARGET_I386)
+static void enable_processor(struct gpe_regs *g, int cpu)
+{
+ g->sts |= 4;
+ g->cpus_sts[cpu/8] |= (1 << (cpu%8));
+}
+
+static void disable_processor(struct gpe_regs *g, int cpu)
+{
+ g->sts |= 4;
+ g->cpus_sts[cpu/8] &= ~(1 << (cpu%8));
+}
+
+void qemu_system_cpu_hot_add(int cpu, int state)
+{
+ CPUState *env;
+
+ if (state && !qemu_get_cpu(cpu)) {
+ env = pc_new_cpu(model);
+ if (!env) {
+ fprintf(stderr, "cpu %d creation failed\n", cpu);
+ return;
+ }
+ env->cpuid_apic_id = cpu;
+ }
+
+ if (state)
+ enable_processor(&gpe, cpu);
+ else
+ disable_processor(&gpe, cpu);
+ if (gpe.en & 4) {
+ qemu_set_irq(pm_state->irq, 1);
+ qemu_set_irq(pm_state->irq, 0);
+ }
+}
+#endif
+
static void enable_device(struct pci_status *p, struct gpe_regs *g, int slot)
{
g->sts |= 2;
diff --git a/hw/apic.c b/hw/apic.c
index 87e7dc0ba..ae805dc15 100644
--- a/hw/apic.c
+++ b/hw/apic.c
@@ -24,6 +24,8 @@
#include "host-utils.h"
#include "kvm.h"
+#include "qemu-kvm.h"
+
//#define DEBUG_APIC
/* APIC Local Vector Table */
@@ -299,8 +301,11 @@ void cpu_set_apic_base(CPUState *env, uint64_t val)
#endif
if (!s)
return;
- s->apicbase = (val & 0xfffff000) |
- (s->apicbase & (MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE));
+ if (kvm_enabled() && kvm_irqchip_in_kernel())
+ s->apicbase = val;
+ else
+ s->apicbase = (val & 0xfffff000) |
+ (s->apicbase & (MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE));
/* if disabled, cannot be enabled again */
if (!(val & MSR_IA32_APICBASE_ENABLE)) {
s->apicbase &= ~MSR_IA32_APICBASE_ENABLE;
@@ -393,6 +398,11 @@ int apic_get_irq_delivered(void)
return apic_irq_delivered;
}
+void apic_set_irq_delivered(void)
+{
+ apic_irq_delivered = 1;
+}
+
static void apic_set_irq(APICState *s, int vector_num, int trigger_mode)
{
apic_irq_delivered += !get_bit(s->irr, vector_num);
@@ -478,6 +488,7 @@ void apic_init_reset(CPUState *env)
if (!s)
return;
+ cpu_synchronize_state(env);
s->tpr = 0;
s->spurious_vec = 0xff;
s->log_dest = 0;
@@ -497,6 +508,13 @@ void apic_init_reset(CPUState *env)
s->wait_for_sipi = 1;
env->halted = !(s->apicbase & MSR_IA32_APICBASE_BSP);
+#ifdef KVM_CAP_MP_STATE
+ if (kvm_enabled() && kvm_irqchip_in_kernel()) {
+ env->mp_state
+ = env->halted ? KVM_MP_STATE_UNINITIALIZED : KVM_MP_STATE_RUNNABLE;
+ kvm_load_mpstate(env);
+ }
+#endif
}
static void apic_startup(APICState *s, int vector_num)
@@ -864,6 +882,115 @@ static void apic_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
}
}
+#ifdef KVM_CAP_IRQCHIP
+
+static inline uint32_t kapic_reg(struct kvm_lapic_state *kapic, int reg_id)
+{
+ return *((uint32_t *) (kapic->regs + (reg_id << 4)));
+}
+
+static inline void kapic_set_reg(struct kvm_lapic_state *kapic,
+ int reg_id, uint32_t val)
+{
+ *((uint32_t *) (kapic->regs + (reg_id << 4))) = val;
+}
+
+static void kvm_kernel_lapic_save_to_user(APICState *s)
+{
+ struct kvm_lapic_state apic;
+ struct kvm_lapic_state *kapic = &apic;
+ int i, v;
+
+ kvm_get_lapic(s->cpu_env, kapic);
+
+ s->id = kapic_reg(kapic, 0x2) >> 24;
+ s->tpr = kapic_reg(kapic, 0x8);
+ s->arb_id = kapic_reg(kapic, 0x9);
+ s->log_dest = kapic_reg(kapic, 0xd) >> 24;
+ s->dest_mode = kapic_reg(kapic, 0xe) >> 28;
+ s->spurious_vec = kapic_reg(kapic, 0xf);
+ for (i = 0; i < 8; i++) {
+ s->isr[i] = kapic_reg(kapic, 0x10 + i);
+ s->tmr[i] = kapic_reg(kapic, 0x18 + i);
+ s->irr[i] = kapic_reg(kapic, 0x20 + i);
+ }
+ s->esr = kapic_reg(kapic, 0x28);
+ s->icr[0] = kapic_reg(kapic, 0x30);
+ s->icr[1] = kapic_reg(kapic, 0x31);
+ for (i = 0; i < APIC_LVT_NB; i++)
+ s->lvt[i] = kapic_reg(kapic, 0x32 + i);
+ s->initial_count = kapic_reg(kapic, 0x38);
+ s->divide_conf = kapic_reg(kapic, 0x3e);
+
+ v = (s->divide_conf & 3) | ((s->divide_conf >> 1) & 4);
+ s->count_shift = (v + 1) & 7;
+
+ s->initial_count_load_time = qemu_get_clock(vm_clock);
+ apic_timer_update(s, s->initial_count_load_time);
+}
+
+static void kvm_kernel_lapic_load_from_user(APICState *s)
+{
+ struct kvm_lapic_state apic;
+ struct kvm_lapic_state *klapic = &apic;
+ int i;
+
+ memset(klapic, 0, sizeof apic);
+ kapic_set_reg(klapic, 0x2, s->id << 24);
+ kapic_set_reg(klapic, 0x8, s->tpr);
+ kapic_set_reg(klapic, 0xd, s->log_dest << 24);
+ kapic_set_reg(klapic, 0xe, s->dest_mode << 28 | 0x0fffffff);
+ kapic_set_reg(klapic, 0xf, s->spurious_vec);
+ for (i = 0; i < 8; i++) {
+ kapic_set_reg(klapic, 0x10 + i, s->isr[i]);
+ kapic_set_reg(klapic, 0x18 + i, s->tmr[i]);
+ kapic_set_reg(klapic, 0x20 + i, s->irr[i]);
+ }
+ kapic_set_reg(klapic, 0x28, s->esr);
+ kapic_set_reg(klapic, 0x30, s->icr[0]);
+ kapic_set_reg(klapic, 0x31, s->icr[1]);
+ for (i = 0; i < APIC_LVT_NB; i++)
+ kapic_set_reg(klapic, 0x32 + i, s->lvt[i]);
+ kapic_set_reg(klapic, 0x38, s->initial_count);
+ kapic_set_reg(klapic, 0x3e, s->divide_conf);
+
+ kvm_set_lapic(s->cpu_env, klapic);
+}
+
+#endif
+
+void qemu_kvm_load_lapic(CPUState *env)
+{
+#ifdef KVM_CAP_IRQCHIP
+ if (kvm_enabled() && kvm_vcpu_inited(env) && kvm_irqchip_in_kernel()) {
+ kvm_kernel_lapic_load_from_user(env->apic_state);
+ }
+#endif
+}
+
+static void apic_pre_save(void *opaque)
+{
+#ifdef KVM_CAP_IRQCHIP
+ APICState *s = (void *)opaque;
+
+ if (kvm_enabled() && kvm_irqchip_in_kernel()) {
+ kvm_kernel_lapic_save_to_user(s);
+ }
+#endif
+}
+
+static int apic_post_load(void *opaque, int version_id)
+{
+#ifdef KVM_CAP_IRQCHIP
+ APICState *s = opaque;
+
+ if (kvm_enabled() && kvm_irqchip_in_kernel()) {
+ kvm_kernel_lapic_load_from_user(s);
+ }
+#endif
+ return 0;
+}
+
/* This function is only used for old state version 1 and 2 */
static int apic_load_old(QEMUFile *f, void *opaque, int version_id)
{
@@ -900,6 +1027,9 @@ static int apic_load_old(QEMUFile *f, void *opaque, int version_id)
if (version_id >= 2)
qemu_get_timer(f, s->timer);
+
+ qemu_kvm_load_lapic(s->cpu_env);
+
return 0;
}
@@ -930,7 +1060,9 @@ static const VMStateDescription vmstate_apic = {
VMSTATE_INT64(next_time, APICState),
VMSTATE_TIMER(timer, APICState),
VMSTATE_END_OF_LIST()
- }
+ },
+ .pre_save = apic_pre_save,
+ .post_load = apic_post_load,
};
static void apic_reset(void *opaque)
@@ -955,6 +1087,7 @@ static void apic_reset(void *opaque)
*/
s->lvt[APIC_LVT_LINT0] = 0x700;
}
+ qemu_kvm_load_lapic(s->cpu_env);
}
static CPUReadMemoryFunc * const apic_mem_read[3] = {
@@ -998,6 +1131,11 @@ int apic_init(CPUState *env)
vmstate_register(s->idx, &vmstate_apic, s);
qemu_register_reset(apic_reset, s);
+ /* apic_reset must be called before the vcpu threads are initialized and load
+ * registers, in qemu-kvm.
+ */
+ apic_reset(s);
+
local_apics[s->idx] = s;
return 0;
}
diff --git a/hw/cirrus_vga.c b/hw/cirrus_vga.c
index 9f61a01d4..571044f64 100644
--- a/hw/cirrus_vga.c
+++ b/hw/cirrus_vga.c
@@ -32,6 +32,7 @@
#include "console.h"
#include "vga_int.h"
#include "kvm.h"
+#include "qemu-kvm.h"
#include "loader.h"
/*
@@ -2552,6 +2553,7 @@ static CPUWriteMemoryFunc * const cirrus_linear_bitblt_write[3] = {
static void map_linear_vram(CirrusVGAState *s)
{
+ vga_dirty_log_stop(&s->vga);
if (!s->vga.map_addr && s->vga.lfb_addr && s->vga.lfb_end) {
s->vga.map_addr = s->vga.lfb_addr;
s->vga.map_end = s->vga.lfb_end;
@@ -2561,13 +2563,19 @@ static void map_linear_vram(CirrusVGAState *s)
if (!s->vga.map_addr)
return;
+#ifndef TARGET_IA64
s->vga.lfb_vram_mapped = 0;
+ cpu_register_physical_memory(isa_mem_base + 0xa0000, 0x8000,
+ (s->vga.vram_offset + s->cirrus_bank_base[0]) | IO_MEM_UNASSIGNED);
+ cpu_register_physical_memory(isa_mem_base + 0xa8000, 0x8000,
+ (s->vga.vram_offset + s->cirrus_bank_base[1]) | IO_MEM_UNASSIGNED);
if (!(s->cirrus_srcptr != s->cirrus_srcptr_end)
&& !((s->vga.sr[0x07] & 0x01) == 0)
&& !((s->vga.gr[0x0B] & 0x14) == 0x14)
&& !(s->vga.gr[0x0B] & 0x02)) {
+ vga_dirty_log_stop(&s->vga);
cpu_register_physical_memory(isa_mem_base + 0xa0000, 0x8000,
(s->vga.vram_offset + s->cirrus_bank_base[0]) | IO_MEM_RAM);
cpu_register_physical_memory(isa_mem_base + 0xa8000, 0x8000,
@@ -2579,12 +2587,14 @@ static void map_linear_vram(CirrusVGAState *s)
cpu_register_physical_memory(isa_mem_base + 0xa0000, 0x20000,
s->vga.vga_io_memory);
}
+#endif
vga_dirty_log_start(&s->vga);
}
static void unmap_linear_vram(CirrusVGAState *s)
{
+ vga_dirty_log_stop(&s->vga);
if (s->vga.map_addr && s->vga.lfb_addr && s->vga.lfb_end) {
s->vga.map_addr = s->vga.map_end = 0;
cpu_register_physical_memory(s->vga.lfb_addr, s->vga.vram_size,
@@ -2592,6 +2602,8 @@ static void unmap_linear_vram(CirrusVGAState *s)
}
cpu_register_physical_memory(isa_mem_base + 0xa0000, 0x20000,
s->vga.vga_io_memory);
+
+ vga_dirty_log_start(&s->vga);
}
/* Compute the memory access functions */
@@ -3145,6 +3157,8 @@ static void cirrus_pci_lfb_map(PCIDevice *d, int region_num,
{
CirrusVGAState *s = &DO_UPCAST(PCICirrusVGAState, dev, d)->cirrus_vga;
+ vga_dirty_log_stop(&s->vga);
+
/* XXX: add byte swapping apertures */
cpu_register_physical_memory(addr, s->vga.vram_size,
s->cirrus_linear_io_addr);
@@ -3176,10 +3190,14 @@ static void pci_cirrus_write_config(PCIDevice *d,
PCICirrusVGAState *pvs = DO_UPCAST(PCICirrusVGAState, dev, d);
CirrusVGAState *s = &pvs->cirrus_vga;
+ vga_dirty_log_stop(&s->vga);
+
pci_default_write_config(d, address, val, len);
if (s->vga.map_addr && d->io_regions[0].addr == PCI_BAR_UNMAPPED)
s->vga.map_addr = 0;
cirrus_update_memory_access(s);
+
+ vga_dirty_log_start(&s->vga);
}
static int pci_cirrus_vga_initfn(PCIDevice *dev)
diff --git a/hw/device-assignment.c b/hw/device-assignment.c
new file mode 100644
index 000000000..801950eaf
--- /dev/null
+++ b/hw/device-assignment.c
@@ -0,0 +1,1378 @@
+/*
+ * Copyright (c) 2007, Neocleus Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ *
+ * Assign a PCI device from the host to a guest VM.
+ *
+ * Adapted for KVM by Qumranet.
+ *
+ * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
+ * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
+ * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
+ * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
+ * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/io.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include "qemu-kvm.h"
+#include "hw.h"
+#include "pc.h"
+#include "sysemu.h"
+#include "console.h"
+#include "device-assignment.h"
+#include "loader.h"
+#include <pci/pci.h>
+
+/* From linux/ioport.h */
+#define IORESOURCE_IO 0x00000100 /* Resource type */
+#define IORESOURCE_MEM 0x00000200
+#define IORESOURCE_IRQ 0x00000400
+#define IORESOURCE_DMA 0x00000800
+#define IORESOURCE_PREFETCH 0x00001000 /* No side effects */
+
+/* #define DEVICE_ASSIGNMENT_DEBUG 1 */
+
+#ifdef DEVICE_ASSIGNMENT_DEBUG
+#define DEBUG(fmt, ...) \
+ do { \
+ fprintf(stderr, "%s: " fmt, __func__ , __VA_ARGS__); \
+ } while (0)
+#else
+#define DEBUG(fmt, ...) do { } while(0)
+#endif
+
+static void assigned_dev_load_option_rom(AssignedDevice *dev);
+
+static uint32_t guest_to_host_ioport(AssignedDevRegion *region, uint32_t addr)
+{
+ return region->u.r_baseport + (addr - region->e_physbase);
+}
+
+static void assigned_dev_ioport_writeb(void *opaque, uint32_t addr,
+ uint32_t value)
+{
+ AssignedDevRegion *r_access = opaque;
+ uint32_t r_pio = guest_to_host_ioport(r_access, addr);
+
+ DEBUG("r_pio=%08x e_physbase=%08x r_baseport=%08lx value=%08x\n",
+ r_pio, (int)r_access->e_physbase,
+ (unsigned long)r_access->u.r_baseport, value);
+
+ outb(value, r_pio);
+}
+
+static void assigned_dev_ioport_writew(void *opaque, uint32_t addr,
+ uint32_t value)
+{
+ AssignedDevRegion *r_access = opaque;
+ uint32_t r_pio = guest_to_host_ioport(r_access, addr);
+
+ DEBUG("r_pio=%08x e_physbase=%08x r_baseport=%08lx value=%08x\n",
+ r_pio, (int)r_access->e_physbase,
+ (unsigned long)r_access->u.r_baseport, value);
+
+ outw(value, r_pio);
+}
+
+static void assigned_dev_ioport_writel(void *opaque, uint32_t addr,
+ uint32_t value)
+{
+ AssignedDevRegion *r_access = opaque;
+ uint32_t r_pio = guest_to_host_ioport(r_access, addr);
+
+ DEBUG("r_pio=%08x e_physbase=%08x r_baseport=%08lx value=%08x\n",
+ r_pio, (int)r_access->e_physbase,
+ (unsigned long)r_access->u.r_baseport, value);
+
+ outl(value, r_pio);
+}
+
+static uint32_t assigned_dev_ioport_readb(void *opaque, uint32_t addr)
+{
+ AssignedDevRegion *r_access = opaque;
+ uint32_t r_pio = guest_to_host_ioport(r_access, addr);
+ uint32_t value;
+
+ value = inb(r_pio);
+
+ DEBUG("r_pio=%08x e_physbase=%08x r_=%08lx value=%08x\n",
+ r_pio, (int)r_access->e_physbase,
+ (unsigned long)r_access->u.r_baseport, value);
+
+ return value;
+}
+
+static uint32_t assigned_dev_ioport_readw(void *opaque, uint32_t addr)
+{
+ AssignedDevRegion *r_access = opaque;
+ uint32_t r_pio = guest_to_host_ioport(r_access, addr);
+ uint32_t value;
+
+ value = inw(r_pio);
+
+ DEBUG("r_pio=%08x e_physbase=%08x r_baseport=%08lx value=%08x\n",
+ r_pio, (int)r_access->e_physbase,
+ (unsigned long)r_access->u.r_baseport, value);
+
+ return value;
+}
+
+static uint32_t assigned_dev_ioport_readl(void *opaque, uint32_t addr)
+{
+ AssignedDevRegion *r_access = opaque;
+ uint32_t r_pio = guest_to_host_ioport(r_access, addr);
+ uint32_t value;
+
+ value = inl(r_pio);
+
+ DEBUG("r_pio=%08x e_physbase=%08x r_baseport=%08lx value=%08x\n",
+ r_pio, (int)r_access->e_physbase,
+ (unsigned long)r_access->u.r_baseport, value);
+
+ return value;
+}
+
+static void assigned_dev_iomem_map(PCIDevice *pci_dev, int region_num,
+ pcibus_t e_phys, pcibus_t e_size, int type)
+{
+ AssignedDevice *r_dev = container_of(pci_dev, AssignedDevice, dev);
+ AssignedDevRegion *region = &r_dev->v_addrs[region_num];
+ PCIRegion *real_region = &r_dev->real_device.regions[region_num];
+ pcibus_t old_ephys = region->e_physbase;
+ pcibus_t old_esize = region->e_size;
+ int first_map = (region->e_size == 0);
+ int ret = 0;
+
+ DEBUG("e_phys=%08x r_virt=%p type=%d len=%08x region_num=%d \n",
+ e_phys, region->u.r_virtbase, type, e_size, region_num);
+
+ region->e_physbase = e_phys;
+ region->e_size = e_size;
+
+ if (!first_map)
+ kvm_destroy_phys_mem(kvm_context, old_ephys,
+ TARGET_PAGE_ALIGN(old_esize));
+
+ if (e_size > 0) {
+ /* deal with MSI-X MMIO page */
+ if (real_region->base_addr <= r_dev->msix_table_addr &&
+ real_region->base_addr + real_region->size >=
+ r_dev->msix_table_addr) {
+ int offset = r_dev->msix_table_addr - real_region->base_addr;
+ ret = munmap(region->u.r_virtbase + offset, TARGET_PAGE_SIZE);
+ if (ret == 0)
+ DEBUG("munmap done, virt_base 0x%p\n",
+ region->u.r_virtbase + offset);
+ else {
+ fprintf(stderr, "%s: fail munmap msix table!\n", __func__);
+ exit(1);
+ }
+ cpu_register_physical_memory(e_phys + offset,
+ TARGET_PAGE_SIZE, r_dev->mmio_index);
+ }
+ ret = kvm_register_phys_mem(kvm_context, e_phys,
+ region->u.r_virtbase,
+ TARGET_PAGE_ALIGN(e_size), 0);
+ }
+
+ if (ret != 0) {
+ fprintf(stderr, "%s: Error: create new mapping failed\n", __func__);
+ exit(1);
+ }
+}
+
+static void assigned_dev_ioport_map(PCIDevice *pci_dev, int region_num,
+ pcibus_t addr, pcibus_t size, int type)
+{
+ AssignedDevice *r_dev = container_of(pci_dev, AssignedDevice, dev);
+ AssignedDevRegion *region = &r_dev->v_addrs[region_num];
+ int first_map = (region->e_size == 0);
+ CPUState *env;
+
+ region->e_physbase = addr;
+ region->e_size = size;
+
+ DEBUG("e_phys=0x%x r_baseport=%x type=0x%x len=%d region_num=%d \n",
+ addr, region->u.r_baseport, type, size, region_num);
+
+ if (first_map) {
+ struct ioperm_data *data;
+
+ data = qemu_mallocz(sizeof(struct ioperm_data));
+ if (data == NULL) {
+ fprintf(stderr, "%s: Out of memory\n", __func__);
+ exit(1);
+ }
+
+ data->start_port = region->u.r_baseport;
+ data->num = region->r_size;
+ data->turn_on = 1;
+
+ kvm_add_ioperm_data(data);
+
+ for (env = first_cpu; env; env = env->next_cpu)
+ kvm_ioperm(env, data);
+ }
+
+ register_ioport_read(addr, size, 1, assigned_dev_ioport_readb,
+ (r_dev->v_addrs + region_num));
+ register_ioport_read(addr, size, 2, assigned_dev_ioport_readw,
+ (r_dev->v_addrs + region_num));
+ register_ioport_read(addr, size, 4, assigned_dev_ioport_readl,
+ (r_dev->v_addrs + region_num));
+ register_ioport_write(addr, size, 1, assigned_dev_ioport_writeb,
+ (r_dev->v_addrs + region_num));
+ register_ioport_write(addr, size, 2, assigned_dev_ioport_writew,
+ (r_dev->v_addrs + region_num));
+ register_ioport_write(addr, size, 4, assigned_dev_ioport_writel,
+ (r_dev->v_addrs + region_num));
+}
+
+static uint8_t pci_find_cap_offset(struct pci_dev *pci_dev, uint8_t cap)
+{
+ int id;
+ int max_cap = 48;
+ int pos = PCI_CAPABILITY_LIST;
+ int status;
+
+ status = pci_read_byte(pci_dev, PCI_STATUS);
+ if ((status & PCI_STATUS_CAP_LIST) == 0)
+ return 0;
+
+ while (max_cap--) {
+ pos = pci_read_byte(pci_dev, pos);
+ if (pos < 0x40)
+ break;
+
+ pos &= ~3;
+ id = pci_read_byte(pci_dev, pos + PCI_CAP_LIST_ID);
+
+ if (id == 0xff)
+ break;
+ if (id == cap)
+ return pos;
+
+ pos += PCI_CAP_LIST_NEXT;
+ }
+ return 0;
+}
+
+static void assigned_dev_pci_write_config(PCIDevice *d, uint32_t address,
+ uint32_t val, int len)
+{
+ int fd;
+ ssize_t ret;
+ AssignedDevice *pci_dev = container_of(d, AssignedDevice, dev);
+
+ DEBUG("(%x.%x): address=%04x val=0x%08x len=%d\n",
+ ((d->devfn >> 3) & 0x1F), (d->devfn & 0x7),
+ (uint16_t) address, val, len);
+
+ if (address == 0x4) {
+ pci_default_write_config(d, address, val, len);
+ /* Continue to program the card */
+ }
+
+ if ((address >= 0x10 && address <= 0x24) || address == 0x30 ||
+ address == 0x34 || address == 0x3c || address == 0x3d ||
+ pci_access_cap_config(d, address, len)) {
+ /* used for update-mappings (BAR emulation) */
+ pci_default_write_config(d, address, val, len);
+ return;
+ }
+
+ DEBUG("NON BAR (%x.%x): address=%04x val=0x%08x len=%d\n",
+ ((d->devfn >> 3) & 0x1F), (d->devfn & 0x7),
+ (uint16_t) address, val, len);
+
+ fd = pci_dev->real_device.config_fd;
+
+again:
+ ret = pwrite(fd, &val, len, address);
+ if (ret != len) {
+ if ((ret < 0) && (errno == EINTR || errno == EAGAIN))
+ goto again;
+
+ fprintf(stderr, "%s: pwrite failed, ret = %zd errno = %d\n",
+ __func__, ret, errno);
+
+ exit(1);
+ }
+}
+
+static uint32_t assigned_dev_pci_read_config(PCIDevice *d, uint32_t address,
+ int len)
+{
+ uint32_t val = 0;
+ int fd;
+ ssize_t ret;
+ AssignedDevice *pci_dev = container_of(d, AssignedDevice, dev);
+
+ if (address < 0x4 || (pci_dev->need_emulate_cmd && address == 0x4) ||
+ (address >= 0x10 && address <= 0x24) || address == 0x30 ||
+ address == 0x34 || address == 0x3c || address == 0x3d ||
+ pci_access_cap_config(d, address, len)) {
+ val = pci_default_read_config(d, address, len);
+ DEBUG("(%x.%x): address=%04x val=0x%08x len=%d\n",
+ (d->devfn >> 3) & 0x1F, (d->devfn & 0x7), address, val, len);
+ return val;
+ }
+
+ /* vga specific, remove later */
+ if (address == 0xFC)
+ goto do_log;
+
+ fd = pci_dev->real_device.config_fd;
+
+again:
+ ret = pread(fd, &val, len, address);
+ if (ret != len) {
+ if ((ret < 0) && (errno == EINTR || errno == EAGAIN))
+ goto again;
+
+ fprintf(stderr, "%s: pread failed, ret = %zd errno = %d\n",
+ __func__, ret, errno);
+
+ exit(1);
+ }
+
+do_log:
+ DEBUG("(%x.%x): address=%04x val=0x%08x len=%d\n",
+ (d->devfn >> 3) & 0x1F, (d->devfn & 0x7), address, val, len);
+
+ if (!pci_dev->cap.available) {
+ /* kill the special capabilities */
+ if (address == 4 && len == 4)
+ val &= ~0x100000;
+ else if (address == 6)
+ val &= ~0x10;
+ }
+
+ return val;
+}
+
+static int assigned_dev_register_regions(PCIRegion *io_regions,
+ unsigned long regions_num,
+ AssignedDevice *pci_dev)
+{
+ uint32_t i;
+ PCIRegion *cur_region = io_regions;
+
+ for (i = 0; i < regions_num; i++, cur_region++) {
+ if (!cur_region->valid)
+ continue;
+ pci_dev->v_addrs[i].num = i;
+
+ /* handle memory io regions */
+ if (cur_region->type & IORESOURCE_MEM) {
+ int t = cur_region->type & IORESOURCE_PREFETCH
+ ? PCI_BASE_ADDRESS_MEM_PREFETCH
+ : PCI_BASE_ADDRESS_SPACE_MEMORY;
+ if (cur_region->size & 0xFFF) {
+ fprintf(stderr, "Unable to assign device: PCI region %d "
+ "at address 0x%llx has size 0x%x, "
+ " which is not a multiple of 4K\n",
+ i, (unsigned long long)cur_region->base_addr,
+ cur_region->size);
+ return -1;
+ }
+
+ /* map physical memory */
+ pci_dev->v_addrs[i].e_physbase = cur_region->base_addr;
+ if (i == PCI_ROM_SLOT) {
+ pci_dev->v_addrs[i].u.r_virtbase =
+ mmap(NULL,
+ (cur_region->size + 0xFFF) & 0xFFFFF000,
+ PROT_WRITE | PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE,
+ 0, (off_t) 0);
+
+ } else {
+ pci_dev->v_addrs[i].u.r_virtbase =
+ mmap(NULL,
+ (cur_region->size + 0xFFF) & 0xFFFFF000,
+ PROT_WRITE | PROT_READ, MAP_SHARED,
+ cur_region->resource_fd, (off_t) 0);
+ }
+
+ if (pci_dev->v_addrs[i].u.r_virtbase == MAP_FAILED) {
+ pci_dev->v_addrs[i].u.r_virtbase = NULL;
+ fprintf(stderr, "%s: Error: Couldn't mmap 0x%x!"
+ "\n", __func__,
+ (uint32_t) (cur_region->base_addr));
+ return -1;
+ }
+
+ if (i == PCI_ROM_SLOT) {
+ memset(pci_dev->v_addrs[i].u.r_virtbase, 0,
+ (cur_region->size + 0xFFF) & 0xFFFFF000);
+ mprotect(pci_dev->v_addrs[PCI_ROM_SLOT].u.r_virtbase,
+ (cur_region->size + 0xFFF) & 0xFFFFF000, PROT_READ);
+ }
+
+ pci_dev->v_addrs[i].r_size = cur_region->size;
+ pci_dev->v_addrs[i].e_size = 0;
+
+ /* add offset */
+ pci_dev->v_addrs[i].u.r_virtbase +=
+ (cur_region->base_addr & 0xFFF);
+
+ pci_register_bar((PCIDevice *) pci_dev, i,
+ cur_region->size, t,
+ assigned_dev_iomem_map);
+ continue;
+ }
+ /* handle port io regions */
+ pci_dev->v_addrs[i].e_physbase = cur_region->base_addr;
+ pci_dev->v_addrs[i].u.r_baseport = cur_region->base_addr;
+ pci_dev->v_addrs[i].r_size = cur_region->size;
+ pci_dev->v_addrs[i].e_size = 0;
+
+ pci_register_bar((PCIDevice *) pci_dev, i,
+ cur_region->size, PCI_BASE_ADDRESS_SPACE_IO,
+ assigned_dev_ioport_map);
+
+ /* not relevant for port io */
+ pci_dev->v_addrs[i].memory_index = 0;
+ }
+
+ /* success */
+ return 0;
+}
+
+static int get_real_device(AssignedDevice *pci_dev, uint8_t r_bus,
+ uint8_t r_dev, uint8_t r_func)
+{
+ char dir[128], name[128];
+ int fd, r = 0;
+ FILE *f;
+ unsigned long long start, end, size, flags;
+ unsigned long id;
+ struct stat statbuf;
+ PCIRegion *rp;
+ PCIDevRegions *dev = &pci_dev->real_device;
+
+ dev->region_number = 0;
+
+ snprintf(dir, sizeof(dir), "/sys/bus/pci/devices/0000:%02x:%02x.%x/",
+ r_bus, r_dev, r_func);
+
+ snprintf(name, sizeof(name), "%sconfig", dir);
+
+ fd = open(name, O_RDWR);
+ if (fd == -1) {
+ fprintf(stderr, "%s: %s: %m\n", __func__, name);
+ return 1;
+ }
+ dev->config_fd = fd;
+again:
+ r = read(fd, pci_dev->dev.config, pci_config_size(&pci_dev->dev));
+ if (r < 0) {
+ if (errno == EINTR || errno == EAGAIN)
+ goto again;
+ fprintf(stderr, "%s: read failed, errno = %d\n", __func__, errno);
+ }
+
+ snprintf(name, sizeof(name), "%sresource", dir);
+
+ f = fopen(name, "r");
+ if (f == NULL) {
+ fprintf(stderr, "%s: %s: %m\n", __func__, name);
+ return 1;
+ }
+
+ for (r = 0; r < PCI_NUM_REGIONS; r++) {
+ if (fscanf(f, "%lli %lli %lli\n", &start, &end, &flags) != 3)
+ break;
+
+ rp = dev->regions + r;
+ rp->valid = 0;
+ size = end - start + 1;
+ flags &= IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ if (size == 0 || (flags & ~IORESOURCE_PREFETCH) == 0)
+ continue;
+ if (flags & IORESOURCE_MEM) {
+ flags &= ~IORESOURCE_IO;
+ if (r != PCI_ROM_SLOT) {
+ snprintf(name, sizeof(name), "%sresource%d", dir, r);
+ fd = open(name, O_RDWR);
+ if (fd == -1)
+ continue;
+ rp->resource_fd = fd;
+ }
+ } else
+ flags &= ~IORESOURCE_PREFETCH;
+
+ rp->type = flags;
+ rp->valid = 1;
+ rp->base_addr = start;
+ rp->size = size;
+ DEBUG("region %d size %d start 0x%llx type %d resource_fd %d\n",
+ r, rp->size, start, rp->type, rp->resource_fd);
+ }
+
+ fclose(f);
+
+ /* read and fill device ID */
+ snprintf(name, sizeof(name), "%svendor", dir);
+ f = fopen(name, "r");
+ if (f == NULL) {
+ fprintf(stderr, "%s: %s: %m\n", __func__, name);
+ return 1;
+ }
+ if (fscanf(f, "%li\n", &id) == 1) {
+ pci_dev->dev.config[0] = id & 0xff;
+ pci_dev->dev.config[1] = (id & 0xff00) >> 8;
+ }
+ fclose(f);
+
+ /* read and fill vendor ID */
+ snprintf(name, sizeof(name), "%sdevice", dir);
+ f = fopen(name, "r");
+ if (f == NULL) {
+ fprintf(stderr, "%s: %s: %m\n", __func__, name);
+ return 1;
+ }
+ if (fscanf(f, "%li\n", &id) == 1) {
+ pci_dev->dev.config[2] = id & 0xff;
+ pci_dev->dev.config[3] = (id & 0xff00) >> 8;
+ }
+ fclose(f);
+
+ /* dealing with virtual function device */
+ snprintf(name, sizeof(name), "%sphysfn/", dir);
+ if (!stat(name, &statbuf))
+ pci_dev->need_emulate_cmd = 1;
+ else
+ pci_dev->need_emulate_cmd = 0;
+
+ dev->region_number = r;
+ return 0;
+}
+
+static QLIST_HEAD(, AssignedDevice) devs = QLIST_HEAD_INITIALIZER(devs);
+
+#ifdef KVM_CAP_IRQ_ROUTING
+static void free_dev_irq_entries(AssignedDevice *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->irq_entries_nr; i++)
+ kvm_del_routing_entry(kvm_context, &dev->entry[i]);
+ free(dev->entry);
+ dev->entry = NULL;
+ dev->irq_entries_nr = 0;
+}
+#endif
+
+static void free_assigned_device(AssignedDevice *dev)
+{
+ if (dev) {
+ int i;
+
+ for (i = 0; i < dev->real_device.region_number; i++) {
+ PCIRegion *pci_region = &dev->real_device.regions[i];
+ AssignedDevRegion *region = &dev->v_addrs[i];
+
+ if (!pci_region->valid)
+ continue;
+
+ if (pci_region->type & IORESOURCE_IO) {
+ kvm_remove_ioperm_data(region->u.r_baseport, region->r_size);
+ continue;
+ } else if (pci_region->type & IORESOURCE_MEM) {
+ if (region->e_size > 0)
+ kvm_destroy_phys_mem(kvm_context, region->e_physbase,
+ TARGET_PAGE_ALIGN(region->e_size));
+
+ if (region->u.r_virtbase) {
+ int ret = munmap(region->u.r_virtbase,
+ (pci_region->size + 0xFFF) & 0xFFFFF000);
+ if (ret != 0)
+ fprintf(stderr,
+ "Failed to unmap assigned device region: %s\n",
+ strerror(errno));
+ }
+ }
+ }
+
+ if (dev->real_device.config_fd) {
+ close(dev->real_device.config_fd);
+ dev->real_device.config_fd = 0;
+ }
+
+#ifdef KVM_CAP_IRQ_ROUTING
+ free_dev_irq_entries(dev);
+#endif
+ }
+}
+
+static uint32_t calc_assigned_dev_id(uint8_t bus, uint8_t devfn)
+{
+ return (uint32_t)bus << 8 | (uint32_t)devfn;
+}
+
+static int assign_device(AssignedDevice *dev)
+{
+ struct kvm_assigned_pci_dev assigned_dev_data;
+ int r;
+
+ memset(&assigned_dev_data, 0, sizeof(assigned_dev_data));
+ assigned_dev_data.assigned_dev_id =
+ calc_assigned_dev_id(dev->h_busnr, dev->h_devfn);
+ assigned_dev_data.busnr = dev->h_busnr;
+ assigned_dev_data.devfn = dev->h_devfn;
+
+#ifdef KVM_CAP_IOMMU
+ /* We always enable the IOMMU unless disabled on the command line */
+ if (dev->use_iommu) {
+ if (!kvm_check_extension(kvm_state, KVM_CAP_IOMMU)) {
+ fprintf(stderr, "No IOMMU found. Unable to assign device \"%s\"\n",
+ dev->dev.qdev.id);
+ return -ENODEV;
+ }
+ assigned_dev_data.flags |= KVM_DEV_ASSIGN_ENABLE_IOMMU;
+ }
+#else
+ dev->use_iommu = 0;
+#endif
+
+ r = kvm_assign_pci_device(kvm_context, &assigned_dev_data);
+ if (r < 0)
+ fprintf(stderr, "Failed to assign device \"%s\" : %s\n",
+ dev->dev.qdev.id, strerror(-r));
+ return r;
+}
+
+static int assign_irq(AssignedDevice *dev)
+{
+ struct kvm_assigned_irq assigned_irq_data;
+ int irq, r = 0;
+
+ /* Interrupt PIN 0 means don't use INTx */
+ if (pci_read_byte(dev->pdev, PCI_INTERRUPT_PIN) == 0)
+ return 0;
+
+ irq = pci_map_irq(&dev->dev, dev->intpin);
+ irq = piix_get_irq(irq);
+
+#ifdef TARGET_IA64
+ irq = ipf_map_irq(&dev->dev, irq);
+#endif
+
+ if (dev->girq == irq)
+ return r;
+
+ memset(&assigned_irq_data, 0, sizeof(assigned_irq_data));
+ assigned_irq_data.assigned_dev_id =
+ calc_assigned_dev_id(dev->h_busnr, dev->h_devfn);
+ assigned_irq_data.guest_irq = irq;
+ assigned_irq_data.host_irq = dev->real_device.irq;
+#ifdef KVM_CAP_ASSIGN_DEV_IRQ
+ if (dev->irq_requested_type) {
+ assigned_irq_data.flags = dev->irq_requested_type;
+ r = kvm_deassign_irq(kvm_context, &assigned_irq_data);
+ /* -ENXIO means no assigned irq */
+ if (r && r != -ENXIO)
+ perror("assign_irq: deassign");
+ }
+
+ assigned_irq_data.flags = KVM_DEV_IRQ_GUEST_INTX;
+ if (dev->cap.available & ASSIGNED_DEVICE_CAP_MSI)
+ assigned_irq_data.flags |= KVM_DEV_IRQ_HOST_MSI;
+ else
+ assigned_irq_data.flags |= KVM_DEV_IRQ_HOST_INTX;
+#endif
+
+ r = kvm_assign_irq(kvm_context, &assigned_irq_data);
+ if (r < 0) {
+ fprintf(stderr, "Failed to assign irq for \"%s\": %s\n",
+ dev->dev.qdev.id, strerror(-r));
+ fprintf(stderr, "Perhaps you are assigning a device "
+ "that shares an IRQ with another device?\n");
+ return r;
+ }
+
+ dev->girq = irq;
+ dev->irq_requested_type = assigned_irq_data.flags;
+ return r;
+}
+
+static void deassign_device(AssignedDevice *dev)
+{
+#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
+ struct kvm_assigned_pci_dev assigned_dev_data;
+ int r;
+
+ memset(&assigned_dev_data, 0, sizeof(assigned_dev_data));
+ assigned_dev_data.assigned_dev_id =
+ calc_assigned_dev_id(dev->h_busnr, dev->h_devfn);
+
+ r = kvm_deassign_pci_device(kvm_context, &assigned_dev_data);
+ if (r < 0)
+ fprintf(stderr, "Failed to deassign device \"%s\" : %s\n",
+ dev->dev.qdev.id, strerror(-r));
+#endif
+}
+
+#if 0
+AssignedDevInfo *get_assigned_device(int pcibus, int slot)
+{
+ AssignedDevice *assigned_dev = NULL;
+ AssignedDevInfo *adev = NULL;
+
+ QLIST_FOREACH(adev, &adev_head, next) {
+ assigned_dev = adev->assigned_dev;
+ if (pci_bus_num(assigned_dev->dev.bus) == pcibus &&
+ PCI_SLOT(assigned_dev->dev.devfn) == slot)
+ return adev;
+ }
+
+ return NULL;
+}
+#endif
+
+/* The pci config space got updated. Check if irq numbers have changed
+ * for our devices
+ */
+void assigned_dev_update_irqs(void)
+{
+ AssignedDevice *dev, *next;
+ int r;
+
+ dev = QLIST_FIRST(&devs);
+ while (dev) {
+ next = QLIST_NEXT(dev, next);
+ r = assign_irq(dev);
+ if (r < 0)
+ qdev_unplug(&dev->dev.qdev);
+ dev = next;
+ }
+}
+
+#ifdef KVM_CAP_IRQ_ROUTING
+
+#ifdef KVM_CAP_DEVICE_MSI
+static void assigned_dev_update_msi(PCIDevice *pci_dev, unsigned int ctrl_pos)
+{
+ struct kvm_assigned_irq assigned_irq_data;
+ AssignedDevice *assigned_dev = container_of(pci_dev, AssignedDevice, dev);
+ uint8_t ctrl_byte = pci_dev->config[ctrl_pos];
+ int r;
+
+ memset(&assigned_irq_data, 0, sizeof assigned_irq_data);
+ assigned_irq_data.assigned_dev_id =
+ calc_assigned_dev_id(assigned_dev->h_busnr,
+ (uint8_t)assigned_dev->h_devfn);
+
+ if (assigned_dev->irq_requested_type) {
+ assigned_irq_data.flags = assigned_dev->irq_requested_type;
+ free_dev_irq_entries(assigned_dev);
+ r = kvm_deassign_irq(kvm_context, &assigned_irq_data);
+ /* -ENXIO means no assigned irq */
+ if (r && r != -ENXIO)
+ perror("assigned_dev_update_msi: deassign irq");
+ }
+
+ if (ctrl_byte & PCI_MSI_FLAGS_ENABLE) {
+ assigned_dev->entry = calloc(1, sizeof(struct kvm_irq_routing_entry));
+ if (!assigned_dev->entry) {
+ perror("assigned_dev_update_msi: ");
+ return;
+ }
+ assigned_dev->entry->u.msi.address_lo =
+ *(uint32_t *)(pci_dev->config + pci_dev->cap.start +
+ PCI_MSI_ADDRESS_LO);
+ assigned_dev->entry->u.msi.address_hi = 0;
+ assigned_dev->entry->u.msi.data = *(uint16_t *)(pci_dev->config +
+ pci_dev->cap.start + PCI_MSI_DATA_32);
+ assigned_dev->entry->type = KVM_IRQ_ROUTING_MSI;
+ r = kvm_get_irq_route_gsi(kvm_context);
+ if (r < 0) {
+ perror("assigned_dev_update_msi: kvm_get_irq_route_gsi");
+ return;
+ }
+ assigned_dev->entry->gsi = r;
+
+ kvm_add_routing_entry(kvm_context, assigned_dev->entry);
+ if (kvm_commit_irq_routes(kvm_context) < 0) {
+ perror("assigned_dev_update_msi: kvm_commit_irq_routes");
+ assigned_dev->cap.state &= ~ASSIGNED_DEVICE_MSI_ENABLED;
+ return;
+ }
+ assigned_dev->irq_entries_nr = 1;
+
+ assigned_irq_data.guest_irq = assigned_dev->entry->gsi;
+ assigned_irq_data.flags = KVM_DEV_IRQ_HOST_MSI | KVM_DEV_IRQ_GUEST_MSI;
+ if (kvm_assign_irq(kvm_context, &assigned_irq_data) < 0)
+ perror("assigned_dev_enable_msi: assign irq");
+
+ assigned_dev->irq_requested_type = assigned_irq_data.flags;
+ }
+}
+#endif
+
+#ifdef KVM_CAP_DEVICE_MSIX
+static int assigned_dev_update_msix_mmio(PCIDevice *pci_dev)
+{
+ AssignedDevice *adev = container_of(pci_dev, AssignedDevice, dev);
+ u16 entries_nr = 0, entries_max_nr;
+ int pos = 0, i, r = 0;
+ u32 msg_addr, msg_upper_addr, msg_data, msg_ctrl;
+ struct kvm_assigned_msix_nr msix_nr;
+ struct kvm_assigned_msix_entry msix_entry;
+ void *va = adev->msix_table_page;
+
+ if (adev->cap.available & ASSIGNED_DEVICE_CAP_MSI)
+ pos = pci_dev->cap.start + PCI_CAPABILITY_CONFIG_MSI_LENGTH;
+ else
+ pos = pci_dev->cap.start;
+
+ entries_max_nr = pci_dev->config[pos + 2];
+ entries_max_nr &= PCI_MSIX_TABSIZE;
+ entries_max_nr += 1;
+
+ /* Get the usable entry number for allocating */
+ for (i = 0; i < entries_max_nr; i++) {
+ memcpy(&msg_ctrl, va + i * 16 + 12, 4);
+ memcpy(&msg_data, va + i * 16 + 8, 4);
+ /* Ignore unused entry even it's unmasked */
+ if (msg_data == 0)
+ continue;
+ entries_nr ++;
+ }
+
+ if (entries_nr == 0) {
+ fprintf(stderr, "MSI-X entry number is zero!\n");
+ return -EINVAL;
+ }
+ msix_nr.assigned_dev_id = calc_assigned_dev_id(adev->h_busnr,
+ (uint8_t)adev->h_devfn);
+ msix_nr.entry_nr = entries_nr;
+ r = kvm_assign_set_msix_nr(kvm_context, &msix_nr);
+ if (r != 0) {
+ fprintf(stderr, "fail to set MSI-X entry number for MSIX! %s\n",
+ strerror(-r));
+ return r;
+ }
+
+ free_dev_irq_entries(adev);
+ adev->irq_entries_nr = entries_nr;
+ adev->entry = calloc(entries_nr, sizeof(struct kvm_irq_routing_entry));
+ if (!adev->entry) {
+ perror("assigned_dev_update_msix_mmio: ");
+ return -errno;
+ }
+
+ msix_entry.assigned_dev_id = msix_nr.assigned_dev_id;
+ entries_nr = 0;
+ for (i = 0; i < entries_max_nr; i++) {
+ if (entries_nr >= msix_nr.entry_nr)
+ break;
+ memcpy(&msg_ctrl, va + i * 16 + 12, 4);
+ memcpy(&msg_data, va + i * 16 + 8, 4);
+ if (msg_data == 0)
+ continue;
+
+ memcpy(&msg_addr, va + i * 16, 4);
+ memcpy(&msg_upper_addr, va + i * 16 + 4, 4);
+
+ r = kvm_get_irq_route_gsi(kvm_context);
+ if (r < 0)
+ return r;
+
+ adev->entry[entries_nr].gsi = r;
+ adev->entry[entries_nr].type = KVM_IRQ_ROUTING_MSI;
+ adev->entry[entries_nr].flags = 0;
+ adev->entry[entries_nr].u.msi.address_lo = msg_addr;
+ adev->entry[entries_nr].u.msi.address_hi = msg_upper_addr;
+ adev->entry[entries_nr].u.msi.data = msg_data;
+ DEBUG("MSI-X data 0x%x, MSI-X addr_lo 0x%x\n!", msg_data, msg_addr);
+ kvm_add_routing_entry(kvm_context, &adev->entry[entries_nr]);
+
+ msix_entry.gsi = adev->entry[entries_nr].gsi;
+ msix_entry.entry = i;
+ r = kvm_assign_set_msix_entry(kvm_context, &msix_entry);
+ if (r) {
+ fprintf(stderr, "fail to set MSI-X entry! %s\n", strerror(-r));
+ break;
+ }
+ DEBUG("MSI-X entry gsi 0x%x, entry %d\n!",
+ msix_entry.gsi, msix_entry.entry);
+ entries_nr ++;
+ }
+
+ if (r == 0 && kvm_commit_irq_routes(kvm_context) < 0) {
+ perror("assigned_dev_update_msix_mmio: kvm_commit_irq_routes");
+ return -EINVAL;
+ }
+
+ return r;
+}
+
+static void assigned_dev_update_msix(PCIDevice *pci_dev, unsigned int ctrl_pos)
+{
+ struct kvm_assigned_irq assigned_irq_data;
+ AssignedDevice *assigned_dev = container_of(pci_dev, AssignedDevice, dev);
+ uint16_t *ctrl_word = (uint16_t *)(pci_dev->config + ctrl_pos);
+ int r;
+
+ memset(&assigned_irq_data, 0, sizeof assigned_irq_data);
+ assigned_irq_data.assigned_dev_id =
+ calc_assigned_dev_id(assigned_dev->h_busnr,
+ (uint8_t)assigned_dev->h_devfn);
+
+ if (assigned_dev->irq_requested_type) {
+ assigned_irq_data.flags = assigned_dev->irq_requested_type;
+ free_dev_irq_entries(assigned_dev);
+ r = kvm_deassign_irq(kvm_context, &assigned_irq_data);
+ /* -ENXIO means no assigned irq */
+ if (r && r != -ENXIO)
+ perror("assigned_dev_update_msix: deassign irq");
+ }
+ assigned_irq_data.flags = KVM_DEV_IRQ_HOST_MSIX | KVM_DEV_IRQ_GUEST_MSIX;
+
+ if (*ctrl_word & PCI_MSIX_ENABLE) {
+ if (assigned_dev_update_msix_mmio(pci_dev) < 0) {
+ perror("assigned_dev_update_msix_mmio");
+ return;
+ }
+ if (kvm_assign_irq(kvm_context, &assigned_irq_data) < 0) {
+ perror("assigned_dev_enable_msix: assign irq");
+ return;
+ }
+ assigned_dev->irq_requested_type = assigned_irq_data.flags;
+ }
+}
+#endif
+#endif
+
+static void assigned_device_pci_cap_write_config(PCIDevice *pci_dev, uint32_t address,
+ uint32_t val, int len)
+{
+ AssignedDevice *assigned_dev = container_of(pci_dev, AssignedDevice, dev);
+ unsigned int pos = pci_dev->cap.start, ctrl_pos;
+
+ pci_default_cap_write_config(pci_dev, address, val, len);
+#ifdef KVM_CAP_IRQ_ROUTING
+#ifdef KVM_CAP_DEVICE_MSI
+ if (assigned_dev->cap.available & ASSIGNED_DEVICE_CAP_MSI) {
+ ctrl_pos = pos + PCI_MSI_FLAGS;
+ if (address <= ctrl_pos && address + len > ctrl_pos)
+ assigned_dev_update_msi(pci_dev, ctrl_pos);
+ pos += PCI_CAPABILITY_CONFIG_MSI_LENGTH;
+ }
+#endif
+#ifdef KVM_CAP_DEVICE_MSIX
+ if (assigned_dev->cap.available & ASSIGNED_DEVICE_CAP_MSIX) {
+ ctrl_pos = pos + 3;
+ if (address <= ctrl_pos && address + len > ctrl_pos) {
+ ctrl_pos--; /* control is word long */
+ assigned_dev_update_msix(pci_dev, ctrl_pos);
+ }
+ pos += PCI_CAPABILITY_CONFIG_MSIX_LENGTH;
+ }
+#endif
+#endif
+ return;
+}
+
+static int assigned_device_pci_cap_init(PCIDevice *pci_dev)
+{
+ AssignedDevice *dev = container_of(pci_dev, AssignedDevice, dev);
+ PCIRegion *pci_region = dev->real_device.regions;
+ int next_cap_pt = 0;
+
+ pci_dev->cap.length = 0;
+#ifdef KVM_CAP_IRQ_ROUTING
+#ifdef KVM_CAP_DEVICE_MSI
+ /* Expose MSI capability
+ * MSI capability is the 1st capability in capability config */
+ if (pci_find_cap_offset(dev->pdev, PCI_CAP_ID_MSI)) {
+ dev->cap.available |= ASSIGNED_DEVICE_CAP_MSI;
+ memset(&pci_dev->config[pci_dev->cap.start + pci_dev->cap.length],
+ 0, PCI_CAPABILITY_CONFIG_MSI_LENGTH);
+ pci_dev->config[pci_dev->cap.start + pci_dev->cap.length] =
+ PCI_CAP_ID_MSI;
+ pci_dev->cap.length += PCI_CAPABILITY_CONFIG_MSI_LENGTH;
+ next_cap_pt = 1;
+ }
+#endif
+#ifdef KVM_CAP_DEVICE_MSIX
+ /* Expose MSI-X capability */
+ if (pci_find_cap_offset(dev->pdev, PCI_CAP_ID_MSIX)) {
+ int pos, entry_nr, bar_nr;
+ u32 msix_table_entry;
+ dev->cap.available |= ASSIGNED_DEVICE_CAP_MSIX;
+ memset(&pci_dev->config[pci_dev->cap.start + pci_dev->cap.length],
+ 0, PCI_CAPABILITY_CONFIG_MSIX_LENGTH);
+ pos = pci_find_cap_offset(dev->pdev, PCI_CAP_ID_MSIX);
+ entry_nr = pci_read_word(dev->pdev, pos + 2) & PCI_MSIX_TABSIZE;
+ pci_dev->config[pci_dev->cap.start + pci_dev->cap.length] = 0x11;
+ pci_dev->config[pci_dev->cap.start +
+ pci_dev->cap.length + 2] = entry_nr;
+ msix_table_entry = pci_read_long(dev->pdev, pos + PCI_MSIX_TABLE);
+ *(uint32_t *)(pci_dev->config + pci_dev->cap.start +
+ pci_dev->cap.length + PCI_MSIX_TABLE) = msix_table_entry;
+ *(uint32_t *)(pci_dev->config + pci_dev->cap.start +
+ pci_dev->cap.length + PCI_MSIX_PBA) =
+ pci_read_long(dev->pdev, pos + PCI_MSIX_PBA);
+ bar_nr = msix_table_entry & PCI_MSIX_BIR;
+ msix_table_entry &= ~PCI_MSIX_BIR;
+ dev->msix_table_addr = pci_region[bar_nr].base_addr + msix_table_entry;
+ if (next_cap_pt != 0) {
+ pci_dev->config[pci_dev->cap.start + next_cap_pt] =
+ pci_dev->cap.start + pci_dev->cap.length;
+ next_cap_pt += PCI_CAPABILITY_CONFIG_MSI_LENGTH;
+ } else
+ next_cap_pt = 1;
+ pci_dev->cap.length += PCI_CAPABILITY_CONFIG_MSIX_LENGTH;
+ }
+#endif
+#endif
+
+ return 0;
+}
+
+static uint32_t msix_mmio_readl(void *opaque, target_phys_addr_t addr)
+{
+ AssignedDevice *adev = opaque;
+ unsigned int offset = addr & 0xfff;
+ void *page = adev->msix_table_page;
+ uint32_t val = 0;
+
+ memcpy(&val, (void *)((char *)page + offset), 4);
+
+ return val;
+}
+
+static uint32_t msix_mmio_readb(void *opaque, target_phys_addr_t addr)
+{
+ return ((msix_mmio_readl(opaque, addr & ~3)) >>
+ (8 * (addr & 3))) & 0xff;
+}
+
+static uint32_t msix_mmio_readw(void *opaque, target_phys_addr_t addr)
+{
+ return ((msix_mmio_readl(opaque, addr & ~3)) >>
+ (8 * (addr & 3))) & 0xffff;
+}
+
+static void msix_mmio_writel(void *opaque,
+ target_phys_addr_t addr, uint32_t val)
+{
+ AssignedDevice *adev = opaque;
+ unsigned int offset = addr & 0xfff;
+ void *page = adev->msix_table_page;
+
+ DEBUG("write to MSI-X entry table mmio offset 0x%lx, val 0x%lx\n",
+ addr, val);
+ memcpy((void *)((char *)page + offset), &val, 4);
+}
+
+static void msix_mmio_writew(void *opaque,
+ target_phys_addr_t addr, uint32_t val)
+{
+ msix_mmio_writel(opaque, addr & ~3,
+ (val & 0xffff) << (8*(addr & 3)));
+}
+
+static void msix_mmio_writeb(void *opaque,
+ target_phys_addr_t addr, uint32_t val)
+{
+ msix_mmio_writel(opaque, addr & ~3,
+ (val & 0xff) << (8*(addr & 3)));
+}
+
+static CPUWriteMemoryFunc *msix_mmio_write[] = {
+ msix_mmio_writeb, msix_mmio_writew, msix_mmio_writel
+};
+
+static CPUReadMemoryFunc *msix_mmio_read[] = {
+ msix_mmio_readb, msix_mmio_readw, msix_mmio_readl
+};
+
+static int assigned_dev_register_msix_mmio(AssignedDevice *dev)
+{
+ dev->msix_table_page = mmap(NULL, 0x1000,
+ PROT_READ|PROT_WRITE,
+ MAP_ANONYMOUS|MAP_PRIVATE, 0, 0);
+ if (dev->msix_table_page == MAP_FAILED) {
+ fprintf(stderr, "fail allocate msix_table_page! %s\n",
+ strerror(errno));
+ return -EFAULT;
+ }
+ memset(dev->msix_table_page, 0, 0x1000);
+ dev->mmio_index = cpu_register_io_memory(
+ msix_mmio_read, msix_mmio_write, dev);
+ return 0;
+}
+
+static int assigned_initfn(struct PCIDevice *pci_dev)
+{
+ AssignedDevice *dev = DO_UPCAST(AssignedDevice, dev, pci_dev);
+ struct pci_access *pacc;
+ uint8_t e_device, e_intx;
+ int r;
+
+ if (!dev->host.bus && !dev->host.dev && !dev->host.func) {
+ qemu_error("pci-assign: error: no host device specified\n");
+ goto out;
+ }
+
+ if (get_real_device(dev, dev->host.bus, dev->host.dev, dev->host.func)) {
+ qemu_error("pci-assign: Error: Couldn't get real device (%s)!\n",
+ dev->dev.qdev.id);
+ goto out;
+ }
+
+ /* handle real device's MMIO/PIO BARs */
+ if (assigned_dev_register_regions(dev->real_device.regions,
+ dev->real_device.region_number,
+ dev))
+ goto out;
+
+ /* handle interrupt routing */
+ e_device = (dev->dev.devfn >> 3) & 0x1f;
+ e_intx = dev->dev.config[0x3d] - 1;
+ dev->intpin = e_intx;
+ dev->run = 0;
+ dev->girq = 0;
+ dev->h_busnr = dev->host.bus;
+ dev->h_devfn = PCI_DEVFN(dev->host.dev, dev->host.func);
+
+ pacc = pci_alloc();
+ pci_init(pacc);
+ dev->pdev = pci_get_dev(pacc, 0, dev->host.bus, dev->host.dev, dev->host.func);
+
+ if (pci_enable_capability_support(pci_dev, 0, NULL,
+ assigned_device_pci_cap_write_config,
+ assigned_device_pci_cap_init) < 0)
+ goto assigned_out;
+
+ /* assign device to guest */
+ r = assign_device(dev);
+ if (r < 0)
+ goto assigned_out;
+
+ /* assign irq for the device */
+ r = assign_irq(dev);
+ if (r < 0)
+ goto assigned_out;
+
+ /* intercept MSI-X entry page in the MMIO */
+ if (dev->cap.available & ASSIGNED_DEVICE_CAP_MSIX)
+ if (assigned_dev_register_msix_mmio(dev))
+ goto assigned_out;
+
+ assigned_dev_load_option_rom(dev);
+ QLIST_INSERT_HEAD(&devs, dev, next);
+ return 0;
+
+assigned_out:
+ deassign_device(dev);
+out:
+ free_assigned_device(dev);
+ return -1;
+}
+
+static int assigned_exitfn(struct PCIDevice *pci_dev)
+{
+ AssignedDevice *dev = DO_UPCAST(AssignedDevice, dev, pci_dev);
+
+ deassign_device(dev);
+ free_assigned_device(dev);
+ return 0;
+}
+
+static int parse_hostaddr(DeviceState *dev, Property *prop, const char *str)
+{
+ PCIHostDevice *ptr = qdev_get_prop_ptr(dev, prop);
+ int rc;
+
+ rc = pci_parse_host_devaddr(str, &ptr->bus, &ptr->dev, &ptr->func);
+ if (rc != 0)
+ return -1;
+ return 0;
+}
+
+static int print_hostaddr(DeviceState *dev, Property *prop, char *dest, size_t len)
+{
+ PCIHostDevice *ptr = qdev_get_prop_ptr(dev, prop);
+
+ return snprintf(dest, len, "%02x:%02x.%x", ptr->bus, ptr->dev, ptr->func);
+}
+
+PropertyInfo qdev_prop_hostaddr = {
+ .name = "pci-hostaddr",
+ .type = -1,
+ .size = sizeof(PCIHostDevice),
+ .parse = parse_hostaddr,
+ .print = print_hostaddr,
+};
+
+static PCIDeviceInfo assign_info = {
+ .qdev.name = "pci-assign",
+ .qdev.desc = "pass through host pci devices to the guest",
+ .qdev.size = sizeof(AssignedDevice),
+ .init = assigned_initfn,
+ .exit = assigned_exitfn,
+ .config_read = assigned_dev_pci_read_config,
+ .config_write = assigned_dev_pci_write_config,
+ .qdev.props = (Property[]) {
+ DEFINE_PROP("host", AssignedDevice, host, qdev_prop_hostaddr, PCIHostDevice),
+ DEFINE_PROP_UINT32("iommu", AssignedDevice, use_iommu, 1),
+ DEFINE_PROP_END_OF_LIST(),
+ },
+};
+
+static void assign_register_devices(void)
+{
+ pci_qdev_register(&assign_info);
+}
+
+device_init(assign_register_devices)
+
+
+/*
+ * Syntax to assign device:
+ *
+ * -pcidevice host=bus:dev.func[,dma=none][,name=Foo]
+ *
+ * Example:
+ * -pcidevice host=00:13.0,dma=pvdma
+ *
+ * dma can currently only be 'none' to disable iommu support.
+ */
+QemuOpts *add_assigned_device(const char *arg)
+{
+ QemuOpts *opts = NULL;
+ char host[64], id[64], dma[8];
+ int r;
+
+ r = get_param_value(host, sizeof(host), "host", arg);
+ if (!r)
+ goto bad;
+ r = get_param_value(id, sizeof(id), "id", arg);
+ if (!r)
+ r = get_param_value(id, sizeof(id), "name", arg);
+ if (!r)
+ r = get_param_value(id, sizeof(id), "host", arg);
+
+ opts = qemu_opts_create(&qemu_device_opts, id, 0);
+ if (!opts)
+ goto bad;
+ qemu_opt_set(opts, "driver", "pci-assign");
+ qemu_opt_set(opts, "host", host);
+
+#ifdef KVM_CAP_IOMMU
+ r = get_param_value(dma, sizeof(dma), "dma", arg);
+ if (r && !strncmp(dma, "none", 4))
+ qemu_opt_set(opts, "iommu", "0");
+#endif
+ qemu_opts_print(opts, NULL);
+ return opts;
+
+bad:
+ fprintf(stderr, "pcidevice argument parse error; "
+ "please check the help text for usage\n");
+ if (opts)
+ qemu_opts_del(opts);
+ return NULL;
+}
+
+void add_assigned_devices(PCIBus *bus, const char **devices, int n_devices)
+{
+ QemuOpts *opts;
+ int i;
+
+ for (i = 0; i < n_devices; i++) {
+ opts = add_assigned_device(devices[i]);
+ if (opts == NULL) {
+ fprintf(stderr, "Could not add assigned device %s\n", devices[i]);
+ exit(1);
+ }
+ /* generic code will call qdev_device_add() for the device */
+ }
+}
+
+/*
+ * Scan the assigned devices for the devices that have an option ROM, and then
+ * load the corresponding ROM data to RAM. If an error occurs while loading an
+ * option ROM, we just ignore that option ROM and continue with the next one.
+ */
+static void assigned_dev_load_option_rom(AssignedDevice *dev)
+{
+ int size, len;
+ void *buf;
+ FILE *fp;
+ uint8_t i = 1;
+ char rom_file[64];
+
+ snprintf(rom_file, sizeof(rom_file),
+ "/sys/bus/pci/devices/0000:%02x:%02x.%01x/rom",
+ dev->host.bus, dev->host.dev, dev->host.func);
+
+ if (access(rom_file, F_OK))
+ return;
+
+ /* Write something to the ROM file to enable it */
+ fp = fopen(rom_file, "wb");
+ if (fp == NULL)
+ return;
+ len = fwrite(&i, 1, 1, fp);
+ fclose(fp);
+ if (len != 1)
+ return;
+
+ /* The file has to be closed and reopened, otherwise it won't work */
+ fp = fopen(rom_file, "rb");
+ if (fp == NULL)
+ return;
+
+ fseek(fp, 0, SEEK_END);
+ size = ftell(fp);
+ fseek(fp, 0, SEEK_SET);
+
+ buf = malloc(size);
+ if (buf == NULL) {
+ fclose(fp);
+ return;
+ }
+
+ fread(buf, size, 1, fp);
+ if (!feof(fp) || ferror(fp)) {
+ free(buf);
+ fclose(fp);
+ return;
+ }
+ fclose(fp);
+
+ /* Copy ROM contents into the space backing the ROM BAR */
+ if (dev->v_addrs[PCI_ROM_SLOT].r_size >= size &&
+ dev->v_addrs[PCI_ROM_SLOT].u.r_virtbase) {
+ mprotect(dev->v_addrs[PCI_ROM_SLOT].u.r_virtbase,
+ size, PROT_READ | PROT_WRITE);
+ memcpy(dev->v_addrs[PCI_ROM_SLOT].u.r_virtbase,
+ buf, size);
+ mprotect(dev->v_addrs[PCI_ROM_SLOT].u.r_virtbase,
+ size, PROT_READ);
+ }
+
+ free(buf);
+}
diff --git a/hw/device-assignment.h b/hw/device-assignment.h
new file mode 100644
index 000000000..a23126099
--- /dev/null
+++ b/hw/device-assignment.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2007, Neocleus Corporation.
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Data structures for storing PCI state
+ *
+ * Adapted to kvm by Qumranet
+ *
+ * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
+ * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
+ * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
+ * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
+ */
+
+#ifndef __DEVICE_ASSIGNMENT_H__
+#define __DEVICE_ASSIGNMENT_H__
+
+#include <sys/mman.h>
+#include "qemu-common.h"
+#include "qemu-queue.h"
+#include "pci.h"
+
+/* From include/linux/pci.h in the kernel sources */
+#define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
+
+typedef struct PCIHostDevice {
+ int bus;
+ int dev;
+ int func;
+} PCIHostDevice;
+
+typedef struct {
+ int type; /* Memory or port I/O */
+ int valid;
+ uint32_t base_addr;
+ uint32_t size; /* size of the region */
+ int resource_fd;
+} PCIRegion;
+
+typedef struct {
+ uint8_t bus, dev, func; /* Bus inside domain, device and function */
+ int irq; /* IRQ number */
+ uint16_t region_number; /* number of active regions */
+
+ /* Port I/O or MMIO Regions */
+ PCIRegion regions[PCI_NUM_REGIONS];
+ int config_fd;
+} PCIDevRegions;
+
+typedef struct {
+ pcibus_t e_physbase;
+ uint32_t memory_index;
+ union {
+ void *r_virtbase; /* mmapped access address for memory regions */
+ uint32_t r_baseport; /* the base guest port for I/O regions */
+ } u;
+ int num; /* our index within v_addrs[] */
+ pcibus_t e_size; /* emulated size of region in bytes */
+ pcibus_t r_size; /* real size of region in bytes */
+} AssignedDevRegion;
+
+typedef struct AssignedDevice {
+ PCIDevice dev;
+ PCIHostDevice host;
+ uint32_t use_iommu;
+ int intpin;
+ uint8_t debug_flags;
+ AssignedDevRegion v_addrs[PCI_NUM_REGIONS];
+ PCIDevRegions real_device;
+ int run;
+ int girq;
+ unsigned char h_busnr;
+ unsigned int h_devfn;
+ int irq_requested_type;
+ int bound;
+ struct pci_dev *pdev;
+ struct {
+#define ASSIGNED_DEVICE_CAP_MSI (1 << 0)
+#define ASSIGNED_DEVICE_CAP_MSIX (1 << 1)
+ uint32_t available;
+#define ASSIGNED_DEVICE_MSI_ENABLED (1 << 0)
+#define ASSIGNED_DEVICE_MSIX_ENABLED (1 << 1)
+#define ASSIGNED_DEVICE_MSIX_MASKED (1 << 2)
+ uint32_t state;
+ } cap;
+ int irq_entries_nr;
+ struct kvm_irq_routing_entry *entry;
+ void *msix_table_page;
+ target_phys_addr_t msix_table_addr;
+ int mmio_index;
+ int need_emulate_cmd;
+ QLIST_ENTRY(AssignedDevice) next;
+} AssignedDevice;
+
+QemuOpts *add_assigned_device(const char *arg);
+void add_assigned_devices(PCIBus *bus, const char **devices, int n_devices);
+void assigned_dev_update_irqs(void);
+
+#define MAX_DEV_ASSIGN_CMDLINE 8
+
+extern const char *assigned_devices[MAX_DEV_ASSIGN_CMDLINE];
+extern int assigned_devices_index;
+
+#endif /* __DEVICE_ASSIGNMENT_H__ */
diff --git a/hw/extboot.c b/hw/extboot.c
new file mode 100644
index 000000000..b91d54f2f
--- /dev/null
+++ b/hw/extboot.c
@@ -0,0 +1,135 @@
+/*
+ * Extended boot option ROM support.
+ *
+ * Copyright IBM, Corp. 2007
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include "hw.h"
+#include "pc.h"
+#include "isa.h"
+#include "block.h"
+
+/* Extended Boot ROM suport */
+
+union extboot_cmd
+{
+ uint16_t type;
+ struct {
+ uint16_t type;
+ uint16_t cylinders;
+ uint16_t heads;
+ uint16_t sectors;
+ uint64_t nb_sectors;
+ } query_geometry;
+ struct {
+ uint16_t type;
+ uint16_t nb_sectors;
+ uint16_t segment;
+ uint16_t offset;
+ uint64_t sector;
+ } xfer;
+};
+
+static void get_translated_chs(BlockDriverState *bs, int *c, int *h, int *s)
+{
+ bdrv_get_geometry_hint(bs, c, h, s);
+
+ if (*c <= 1024) {
+ *c >>= 0;
+ *h <<= 0;
+ } else if (*c <= 2048) {
+ *c >>= 1;
+ *h <<= 1;
+ } else if (*c <= 4096) {
+ *c >>= 2;
+ *h <<= 2;
+ } else if (*c <= 8192) {
+ *c >>= 3;
+ *h <<= 3;
+ } else {
+ *c >>= 4;
+ *h <<= 4;
+ }
+
+ /* what is the correct algorithm for this?? */
+ if (*h == 256) {
+ *h = 255;
+ *c = *c + 1;
+ }
+}
+
+static uint32_t extboot_read(void *opaque, uint32_t addr)
+{
+ int *pcmd = opaque;
+ return *pcmd;
+}
+
+static void extboot_write_cmd(void *opaque, uint32_t addr, uint32_t value)
+{
+ union extboot_cmd cmd;
+ BlockDriverState *bs = opaque;
+ int cylinders, heads, sectors, err;
+ uint64_t nb_sectors;
+ target_phys_addr_t pa = 0;
+ int blen = 0;
+ void *buf = NULL;
+
+ cpu_physical_memory_read((value & 0xFFFF) << 4, (uint8_t *)&cmd,
+ sizeof(cmd));
+
+ if (cmd.type == 0x01 || cmd.type == 0x02) {
+ pa = cmd.xfer.segment * 16 + cmd.xfer.offset;
+ blen = cmd.xfer.nb_sectors * 512;
+ buf = qemu_memalign(512, blen);
+ }
+
+ switch (cmd.type) {
+ case 0x00:
+ get_translated_chs(bs, &cylinders, &heads, &sectors);
+ bdrv_get_geometry(bs, &nb_sectors);
+ cmd.query_geometry.cylinders = cylinders;
+ cmd.query_geometry.heads = heads;
+ cmd.query_geometry.sectors = sectors;
+ cmd.query_geometry.nb_sectors = nb_sectors;
+ break;
+ case 0x01:
+ err = bdrv_read(bs, cmd.xfer.sector, buf, cmd.xfer.nb_sectors);
+ if (err)
+ printf("Read failed\n");
+
+ cpu_physical_memory_write(pa, buf, blen);
+
+ break;
+ case 0x02:
+ cpu_physical_memory_read(pa, buf, blen);
+
+ err = bdrv_write(bs, cmd.xfer.sector, buf, cmd.xfer.nb_sectors);
+ if (err)
+ printf("Write failed\n");
+
+ break;
+ }
+
+ cpu_physical_memory_write((value & 0xFFFF) << 4, (uint8_t *)&cmd,
+ sizeof(cmd));
+ if (buf)
+ qemu_free(buf);
+}
+
+void extboot_init(BlockDriverState *bs, int cmd)
+{
+ int *pcmd;
+
+ pcmd = qemu_mallocz(sizeof(int));
+
+ *pcmd = cmd;
+ register_ioport_read(0x404, 1, 1, extboot_read, pcmd);
+ register_ioport_write(0x405, 1, 2, extboot_write_cmd, bs);
+}
diff --git a/hw/hpet.c b/hw/hpet.c
index 6f397110c..92382e7bd 100644
--- a/hw/hpet.c
+++ b/hw/hpet.c
@@ -170,6 +170,11 @@ static int hpet_post_load(void *opaque, int version_id)
/* Recalculate the offset between the main counter and guest time */
s->hpet_offset = ticks_to_ns(s->hpet_counter) - qemu_get_clock(vm_clock);
+
+ if (hpet_in_legacy_mode()) {
+ hpet_disable_pit();
+ }
+
return 0;
}
@@ -473,9 +478,11 @@ static void hpet_ram_writel(void *opaque, target_phys_addr_t addr,
}
/* i8254 and RTC are disabled when HPET is in legacy mode */
if (activating_bit(old_val, new_val, HPET_CFG_LEGACY)) {
- hpet_pit_disable();
+ hpet_disable_pit();
+ dprintf("qemu: hpet disabled pit\n");
} else if (deactivating_bit(old_val, new_val, HPET_CFG_LEGACY)) {
- hpet_pit_enable();
+ hpet_enable_pit();
+ dprintf("qemu: hpet enabled pit\n");
}
break;
case HPET_CFG + 4:
@@ -559,7 +566,7 @@ static void hpet_reset(void *opaque) {
* hpet_reset is called due to system reset. At this point control must
* be returned to pit until SW reenables hpet.
*/
- hpet_pit_enable();
+ hpet_enable_pit();
count = 1;
}
diff --git a/hw/hw.h b/hw/hw.h
index 7b500f4e4..05131a021 100644
--- a/hw/hw.h
+++ b/hw/hw.h
@@ -342,6 +342,10 @@ extern const VMStateInfo vmstate_info_uint16;
extern const VMStateInfo vmstate_info_uint32;
extern const VMStateInfo vmstate_info_uint64;
+#ifdef __linux__
+extern const VMStateInfo vmstate_info_u64;
+#endif
+
extern const VMStateInfo vmstate_info_timer;
extern const VMStateInfo vmstate_info_ptimer;
extern const VMStateInfo vmstate_info_buffer;
@@ -622,6 +626,15 @@ extern const VMStateDescription vmstate_i2c_slave;
#define VMSTATE_UINT64(_f, _s) \
VMSTATE_UINT64_V(_f, _s, 0)
+/* This is needed because on linux __u64 is unsigned long long
+ and on glibc uint64_t is unsigned long on 64 bits */
+#ifdef __linux__
+#define VMSTATE_U64_V(_f, _s, _v) \
+ VMSTATE_SINGLE(_f, _s, _v, vmstate_info_u64, __u64)
+#define VMSTATE_U64(_f, _s) \
+ VMSTATE_U64_V(_f, _s, 0)
+#endif
+
#define VMSTATE_UINT8_EQUAL(_f, _s) \
VMSTATE_SINGLE(_f, _s, 0, vmstate_info_uint8_equal, uint8_t)
diff --git a/hw/i8254-kvm.c b/hw/i8254-kvm.c
new file mode 100644
index 000000000..c62ab6a09
--- /dev/null
+++ b/hw/i8254-kvm.c
@@ -0,0 +1,122 @@
+/*
+ * QEMU 8253/8254 interval timer emulation
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "hw.h"
+#include "pc.h"
+#include "isa.h"
+#include "qemu-timer.h"
+#include "i8254.h"
+#include "qemu-kvm.h"
+
+extern VMStateDescription vmstate_pit;
+
+static PITState pit_state;
+
+static void kvm_pit_pre_save(void *opaque)
+{
+ PITState *s = (void *)opaque;
+ struct kvm_pit_state2 pit2;
+ struct kvm_pit_channel_state *c;
+ struct PITChannelState *sc;
+ int i;
+
+ if(qemu_kvm_has_pit_state2()) {
+ kvm_get_pit2(kvm_context, &pit2);
+ s->flags = pit2.flags;
+ } else {
+ /* pit2 is superset of pit struct so just cast it and use it */
+ kvm_get_pit(kvm_context, (struct kvm_pit_state *)&pit2);
+ }
+ for (i = 0; i < 3; i++) {
+ c = &pit2.channels[i];
+ sc = &s->channels[i];
+ sc->count = c->count;
+ sc->latched_count = c->latched_count;
+ sc->count_latched = c->count_latched;
+ sc->status_latched = c->status_latched;
+ sc->status = c->status;
+ sc->read_state = c->read_state;
+ sc->write_state = c->write_state;
+ sc->write_latch = c->write_latch;
+ sc->rw_mode = c->rw_mode;
+ sc->mode = c->mode;
+ sc->bcd = c->bcd;
+ sc->gate = c->gate;
+ sc->count_load_time = c->count_load_time;
+ }
+}
+
+static int kvm_pit_post_load(void *opaque, int version_id)
+{
+ PITState *s = opaque;
+ struct kvm_pit_state2 pit2;
+ struct kvm_pit_channel_state *c;
+ struct PITChannelState *sc;
+ int i;
+
+ pit2.flags = s->flags;
+ for (i = 0; i < 3; i++) {
+ c = &pit2.channels[i];
+ sc = &s->channels[i];
+ c->count = sc->count;
+ c->latched_count = sc->latched_count;
+ c->count_latched = sc->count_latched;
+ c->status_latched = sc->status_latched;
+ c->status = sc->status;
+ c->read_state = sc->read_state;
+ c->write_state = sc->write_state;
+ c->write_latch = sc->write_latch;
+ c->rw_mode = sc->rw_mode;
+ c->mode = sc->mode;
+ c->bcd = sc->bcd;
+ c->gate = sc->gate;
+ c->count_load_time = sc->count_load_time;
+ }
+
+ if(qemu_kvm_has_pit_state2()) {
+ kvm_set_pit2(kvm_context, &pit2);
+ } else {
+ kvm_set_pit(kvm_context, (struct kvm_pit_state *)&pit2);
+ }
+ return 0;
+}
+
+static void dummy_timer(void *opaque)
+{
+}
+
+PITState *kvm_pit_init(int base, qemu_irq irq)
+{
+ PITState *pit = &pit_state;
+ PITChannelState *s;
+
+ s = &pit->channels[0];
+ s->irq_timer = qemu_new_timer(vm_clock, dummy_timer, s);
+ vmstate_pit.pre_save = kvm_pit_pre_save;
+ vmstate_pit.post_load = kvm_pit_post_load;
+ vmstate_register(base, &vmstate_pit, pit);
+ qemu_register_reset(pit_reset, pit);
+ pit_reset(pit);
+
+ return pit;
+}
diff --git a/hw/i8254.c b/hw/i8254.c
index faaa884d9..c4f8d2e5b 100644
--- a/hw/i8254.c
+++ b/hw/i8254.c
@@ -25,38 +25,11 @@
#include "pc.h"
#include "isa.h"
#include "qemu-timer.h"
+#include "qemu-kvm.h"
+#include "i8254.h"
//#define DEBUG_PIT
-#define RW_STATE_LSB 1
-#define RW_STATE_MSB 2
-#define RW_STATE_WORD0 3
-#define RW_STATE_WORD1 4
-
-typedef struct PITChannelState {
- int count; /* can be 65536 */
- uint16_t latched_count;
- uint8_t count_latched;
- uint8_t status_latched;
- uint8_t status;
- uint8_t read_state;
- uint8_t write_state;
- uint8_t write_latch;
- uint8_t rw_mode;
- uint8_t mode;
- uint8_t bcd; /* not supported */
- uint8_t gate; /* timer start */
- int64_t count_load_time;
- /* irq handling */
- int64_t next_transition_time;
- QEMUTimer *irq_timer;
- qemu_irq irq;
-} PITChannelState;
-
-struct PITState {
- PITChannelState channels[3];
-};
-
static PITState pit_state;
static void pit_irq_timer_update(PITChannelState *s, int64_t current_time);
@@ -228,13 +201,18 @@ int pit_get_mode(PITState *pit, int channel)
return s->mode;
}
-static inline void pit_load_count(PITChannelState *s, int val)
+static inline void pit_load_count(PITState *s, int val, int chan)
{
if (val == 0)
val = 0x10000;
- s->count_load_time = qemu_get_clock(vm_clock);
- s->count = val;
- pit_irq_timer_update(s, s->count_load_time);
+ s->channels[chan].count_load_time = qemu_get_clock(vm_clock);
+ s->channels[chan].count = val;
+#ifdef TARGET_I386
+ if (chan == 0 && pit_state.flags & PIT_FLAGS_HPET_LEGACY) {
+ return;
+ }
+#endif
+ pit_irq_timer_update(&s->channels[chan], s->channels[chan].count_load_time);
}
/* if already latched, do not latch again */
@@ -294,17 +272,17 @@ static void pit_ioport_write(void *opaque, uint32_t addr, uint32_t val)
switch(s->write_state) {
default:
case RW_STATE_LSB:
- pit_load_count(s, val);
+ pit_load_count(pit, val, addr);
break;
case RW_STATE_MSB:
- pit_load_count(s, val << 8);
+ pit_load_count(pit, val << 8, addr);
break;
case RW_STATE_WORD0:
s->write_latch = val;
s->write_state = RW_STATE_WORD1;
break;
case RW_STATE_WORD1:
- pit_load_count(s, s->write_latch | (val << 8));
+ pit_load_count(pit, s->write_latch | (val << 8), addr);
s->write_state = RW_STATE_WORD0;
break;
}
@@ -364,6 +342,11 @@ static uint32_t pit_ioport_read(void *opaque, uint32_t addr)
return ret;
}
+/* global counters for time-drift fix */
+int64_t timer_acks=0, timer_interrupts=0, timer_ints_to_push=0;
+
+extern int time_drift_fix;
+
static void pit_irq_timer_update(PITChannelState *s, int64_t current_time)
{
int64_t expire_time;
@@ -374,16 +357,35 @@ static void pit_irq_timer_update(PITChannelState *s, int64_t current_time)
expire_time = pit_get_next_transition_time(s, current_time);
irq_level = pit_get_out1(s, current_time);
qemu_set_irq(s->irq, irq_level);
+ if (time_drift_fix && irq_level==1) {
+ /* FIXME: fine tune timer_max_fix (max fix per tick).
+ * Should it be 1 (double time), 2 , 4, 10 ?
+ * Currently setting it to 5% of PIT-ticks-per-second (per PIT-tick)
+ */
+ const long pit_ticks_per_sec = (s->count>0) ? (PIT_FREQ/s->count) : 0;
+ const long timer_max_fix = pit_ticks_per_sec/20;
+ const long delta = timer_interrupts - timer_acks;
+ const long max_delta = pit_ticks_per_sec * 60; /* one minute */
+ if ((delta > max_delta) && (pit_ticks_per_sec > 0)) {
+ printf("time drift is too long, %ld seconds were lost\n", delta/pit_ticks_per_sec);
+ timer_acks = timer_interrupts;
+ timer_ints_to_push = 0;
+ } else if (delta > 0) {
+ timer_ints_to_push = MIN(delta, timer_max_fix);
+ }
+ timer_interrupts++;
+ }
#ifdef DEBUG_PIT
printf("irq_level=%d next_delay=%f\n",
irq_level,
(double)(expire_time - current_time) / get_ticks_per_sec());
#endif
s->next_transition_time = expire_time;
- if (expire_time != -1)
+ if (expire_time != -1) {
qemu_mod_timer(s->irq_timer, expire_time);
- else
+ } else {
qemu_del_timer(s->irq_timer);
+ }
}
static void pit_irq_timer(void *opaque)
@@ -423,9 +425,10 @@ static int pit_load_old(QEMUFile *f, void *opaque, int version_id)
PITChannelState *s;
int i;
- if (version_id != 1)
+ if (version_id != PIT_SAVEVM_VERSION)
return -EINVAL;
+ pit->flags = qemu_get_be32(f);
for(i = 0; i < 3; i++) {
s = &pit->channels[i];
s->count=qemu_get_be32(f);
@@ -446,57 +449,85 @@ static int pit_load_old(QEMUFile *f, void *opaque, int version_id)
qemu_get_timer(f, s->irq_timer);
}
}
+
return 0;
}
-static const VMStateDescription vmstate_pit = {
+VMStateDescription vmstate_pit = {
.name = "i8254",
.version_id = 2,
.minimum_version_id = 2,
.minimum_version_id_old = 1,
.load_state_old = pit_load_old,
.fields = (VMStateField []) {
+ VMSTATE_UINT32(flags, PITState),
VMSTATE_STRUCT_ARRAY(channels, PITState, 3, 2, vmstate_pit_channel, PITChannelState),
VMSTATE_TIMER(channels[0].irq_timer, PITState),
VMSTATE_END_OF_LIST()
}
};
-static void pit_reset(void *opaque)
+void pit_reset(void *opaque)
{
PITState *pit = opaque;
PITChannelState *s;
int i;
+#ifdef TARGET_I386
+ pit->flags &= ~PIT_FLAGS_HPET_LEGACY;
+#endif
for(i = 0;i < 3; i++) {
s = &pit->channels[i];
s->mode = 3;
s->gate = (i != 2);
- pit_load_count(s, 0);
+ pit_load_count(pit, 0, i);
}
}
+#ifdef TARGET_I386
/* When HPET is operating in legacy mode, i8254 timer0 is disabled */
-void hpet_pit_disable(void) {
- PITChannelState *s;
- s = &pit_state.channels[0];
- if (s->irq_timer)
- qemu_del_timer(s->irq_timer);
+
+void hpet_disable_pit(void)
+{
+ PITChannelState *s = &pit_state.channels[0];
+
+ if (kvm_enabled() && qemu_kvm_pit_in_kernel()) {
+ if (qemu_kvm_has_pit_state2()) {
+ kvm_hpet_disable_kpit();
+ } else {
+ fprintf(stderr, "%s: kvm does not support pit_state2!\n", __FUNCTION__);
+ exit(1);
+ }
+ } else {
+ pit_state.flags |= PIT_FLAGS_HPET_LEGACY;
+ if (s->irq_timer) {
+ qemu_del_timer(s->irq_timer);
+ }
+ }
}
/* When HPET is reset or leaving legacy mode, it must reenable i8254
* timer 0
*/
-void hpet_pit_enable(void)
+void hpet_enable_pit(void)
{
PITState *pit = &pit_state;
- PITChannelState *s;
- s = &pit->channels[0];
- s->mode = 3;
- s->gate = 1;
- pit_load_count(s, 0);
+ PITChannelState *s = &pit->channels[0];
+
+ if (kvm_enabled() && qemu_kvm_pit_in_kernel()) {
+ if (qemu_kvm_has_pit_state2()) {
+ kvm_hpet_enable_kpit();
+ } else {
+ fprintf(stderr, "%s: kvm does not support pit_state2!\n", __FUNCTION__);
+ exit(1);
+ }
+ } else {
+ pit_state.flags &= ~PIT_FLAGS_HPET_LEGACY;
+ pit_load_count(pit, s->count, 0);
+ }
}
+#endif
PITState *pit_init(int base, qemu_irq irq)
{
diff --git a/hw/i8254.h b/hw/i8254.h
new file mode 100644
index 000000000..d23303a8b
--- /dev/null
+++ b/hw/i8254.h
@@ -0,0 +1,69 @@
+/*
+ * QEMU 8253/8254 interval timer emulation
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef QEMU_I8254_H
+#define QEMU_I8254_H
+
+#define PIT_SAVEVM_NAME "i8254"
+#define PIT_SAVEVM_VERSION 2
+
+#define RW_STATE_LSB 1
+#define RW_STATE_MSB 2
+#define RW_STATE_WORD0 3
+#define RW_STATE_WORD1 4
+
+#define PIT_FLAGS_HPET_LEGACY 1
+
+typedef struct PITChannelState {
+ int count; /* can be 65536 */
+ uint16_t latched_count;
+ uint8_t count_latched;
+ uint8_t status_latched;
+ uint8_t status;
+ uint8_t read_state;
+ uint8_t write_state;
+ uint8_t write_latch;
+ uint8_t rw_mode;
+ uint8_t mode;
+ uint8_t bcd; /* not supported */
+ uint8_t gate; /* timer start */
+ int64_t count_load_time;
+ /* irq handling */
+ int64_t next_transition_time;
+ QEMUTimer *irq_timer;
+ qemu_irq irq;
+} PITChannelState;
+
+struct PITState {
+ PITChannelState channels[3];
+ uint32_t flags;
+};
+
+void pit_save(QEMUFile *f, void *opaque);
+
+int pit_load(QEMUFile *f, void *opaque, int version_id);
+
+void pit_reset(void *opaque);
+
+#endif
diff --git a/hw/i8259.c b/hw/i8259.c
index 3de22e343..7a484c02e 100644
--- a/hw/i8259.c
+++ b/hw/i8259.c
@@ -27,6 +27,8 @@
#include "monitor.h"
#include "qemu-timer.h"
+#include "qemu-kvm.h"
+
/* debug PIC */
//#define DEBUG_PIC
@@ -181,7 +183,6 @@ int64_t irq_time[16];
static void i8259_set_irq(void *opaque, int irq, int level)
{
PicState2 *s = opaque;
-
#if defined(DEBUG_PIC) || defined(DEBUG_IRQ_COUNT)
if (level != irq_level[irq]) {
#if defined(DEBUG_PIC)
@@ -212,18 +213,35 @@ static inline void pic_intack(PicState *s, int irq)
} else {
s->isr |= (1 << irq);
}
+
/* We don't clear a level sensitive interrupt here */
if (!(s->elcr & (1 << irq)))
s->irr &= ~(1 << irq);
+
}
+extern int time_drift_fix;
+
int pic_read_irq(PicState2 *s)
{
int irq, irq2, intno;
irq = pic_get_irq(&s->pics[0]);
if (irq >= 0) {
+
pic_intack(&s->pics[0], irq);
+#ifndef TARGET_IA64
+ if (time_drift_fix && irq == 0) {
+ extern int64_t timer_acks, timer_ints_to_push;
+ timer_acks++;
+ if (timer_ints_to_push > 0) {
+ timer_ints_to_push--;
+ /* simulate an edge irq0, like the one generated by i8254 */
+ pic_set_irq1(&s->pics[0], 0, 0);
+ pic_set_irq1(&s->pics[0], 0, 1);
+ }
+ }
+#endif
if (irq == 2) {
irq2 = pic_get_irq(&s->pics[1]);
if (irq2 >= 0) {
@@ -446,9 +464,33 @@ static uint32_t elcr_ioport_read(void *opaque, uint32_t addr1)
return s->elcr;
}
+static void kvm_kernel_pic_save_to_user(PicState *s);
+static int kvm_kernel_pic_load_from_user(PicState *s);
+
+static void pic_pre_save(void *opaque)
+{
+ PicState *s = opaque;
+
+ if (kvm_enabled() && kvm_irqchip_in_kernel()) {
+ kvm_kernel_pic_save_to_user(s);
+ }
+}
+
+static int pic_post_load(void *opaque, int version_id)
+{
+ PicState *s = opaque;
+
+ if (kvm_enabled() && kvm_irqchip_in_kernel()) {
+ kvm_kernel_pic_load_from_user(s);
+ }
+ return 0;
+}
+
static const VMStateDescription vmstate_pic = {
.name = "i8259",
.version_id = 1,
+ .pre_save = pic_pre_save,
+ .post_load = pic_post_load,
.minimum_version_id = 1,
.minimum_version_id_old = 1,
.fields = (VMStateField []) {
@@ -535,3 +577,103 @@ qemu_irq *i8259_init(qemu_irq parent_irq)
isa_pic = s;
return qemu_allocate_irqs(i8259_set_irq, s, 16);
}
+
+static void kvm_kernel_pic_save_to_user(PicState *s)
+{
+#ifdef KVM_CAP_IRQCHIP
+ struct kvm_irqchip chip;
+ struct kvm_pic_state *kpic;
+
+ chip.chip_id = (&s->pics_state->pics[0] == s) ?
+ KVM_IRQCHIP_PIC_MASTER :
+ KVM_IRQCHIP_PIC_SLAVE;
+ kvm_get_irqchip(kvm_context, &chip);
+ kpic = &chip.chip.pic;
+
+ s->last_irr = kpic->last_irr;
+ s->irr = kpic->irr;
+ s->imr = kpic->imr;
+ s->isr = kpic->isr;
+ s->priority_add = kpic->priority_add;
+ s->irq_base = kpic->irq_base;
+ s->read_reg_select = kpic->read_reg_select;
+ s->poll = kpic->poll;
+ s->special_mask = kpic->special_mask;
+ s->init_state = kpic->init_state;
+ s->auto_eoi = kpic->auto_eoi;
+ s->rotate_on_auto_eoi = kpic->rotate_on_auto_eoi;
+ s->special_fully_nested_mode = kpic->special_fully_nested_mode;
+ s->init4 = kpic->init4;
+ s->elcr = kpic->elcr;
+ s->elcr_mask = kpic->elcr_mask;
+#endif
+}
+
+static int kvm_kernel_pic_load_from_user(PicState *s)
+{
+#ifdef KVM_CAP_IRQCHIP
+ struct kvm_irqchip chip;
+ struct kvm_pic_state *kpic;
+
+ chip.chip_id = (&s->pics_state->pics[0] == s) ?
+ KVM_IRQCHIP_PIC_MASTER :
+ KVM_IRQCHIP_PIC_SLAVE;
+ kpic = &chip.chip.pic;
+
+ kpic->last_irr = s->last_irr;
+ kpic->irr = s->irr;
+ kpic->imr = s->imr;
+ kpic->isr = s->isr;
+ kpic->priority_add = s->priority_add;
+ kpic->irq_base = s->irq_base;
+ kpic->read_reg_select = s->read_reg_select;
+ kpic->poll = s->poll;
+ kpic->special_mask = s->special_mask;
+ kpic->init_state = s->init_state;
+ kpic->auto_eoi = s->auto_eoi;
+ kpic->rotate_on_auto_eoi = s->rotate_on_auto_eoi;
+ kpic->special_fully_nested_mode = s->special_fully_nested_mode;
+ kpic->init4 = s->init4;
+ kpic->elcr = s->elcr;
+ kpic->elcr_mask = s->elcr_mask;
+
+ kvm_set_irqchip(kvm_context, &chip);
+#endif
+ return 0;
+}
+
+#ifdef KVM_CAP_IRQCHIP
+static void kvm_i8259_set_irq(void *opaque, int irq, int level)
+{
+ int pic_ret;
+ if (kvm_set_irq(irq, level, &pic_ret)) {
+ if (pic_ret != 0)
+ apic_set_irq_delivered();
+ return;
+ }
+}
+
+static void kvm_pic_init1(int io_addr, PicState *s)
+{
+ vmstate_register(io_addr, &vmstate_pic, s);
+ qemu_register_reset(pic_reset, s);
+}
+
+qemu_irq *kvm_i8259_init(qemu_irq parent_irq)
+{
+ PicState2 *s;
+
+ s = qemu_mallocz(sizeof(PicState2));
+
+ kvm_pic_init1(0x20, &s->pics[0]);
+ kvm_pic_init1(0xa0, &s->pics[1]);
+ s->parent_irq = parent_irq;
+ s->pics[0].pics_state = s;
+ s->pics[1].pics_state = s;
+ isa_pic = s;
+ return qemu_allocate_irqs(kvm_i8259_set_irq, s, 24);
+}
+#endif
+
+
+
diff --git a/hw/ioapic.c b/hw/ioapic.c
index b0ad78f24..a66325d42 100644
--- a/hw/ioapic.c
+++ b/hw/ioapic.c
@@ -22,12 +22,16 @@
#include "hw.h"
#include "pc.h"
+#include "sysemu.h"
#include "qemu-timer.h"
#include "host-utils.h"
+#include "qemu-kvm.h"
+
//#define DEBUG_IOAPIC
#define IOAPIC_NUM_PINS 0x18
+#define IOAPIC_DEFAULT_BASE_ADDRESS 0xfec00000
#define IOAPIC_LVT_MASKED (1<<16)
#define IOAPIC_TRIGGER_EDGE 0
@@ -45,6 +49,7 @@
struct IOAPICState {
uint8_t id;
uint8_t ioregsel;
+ uint64_t base_address;
uint32_t irr;
uint64_t ioredtbl[IOAPIC_NUM_PINS];
@@ -94,8 +99,9 @@ void ioapic_set_irq(void *opaque, int vector, int level)
* to GSI 2. GSI maps to ioapic 1-1. This is not
* the cleanest way of doing it but it should work. */
- if (vector == 0)
+ if (vector == 0 && irq0override) {
vector = 2;
+ }
if (vector >= 0 && vector < IOAPIC_NUM_PINS) {
uint32_t mask = 1 << vector;
@@ -191,14 +197,91 @@ static void ioapic_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t va
}
}
+static void kvm_kernel_ioapic_save_to_user(IOAPICState *s)
+{
+#if defined(KVM_CAP_IRQCHIP) && defined(TARGET_I386)
+ struct kvm_irqchip chip;
+ struct kvm_ioapic_state *kioapic;
+ int i;
+
+ chip.chip_id = KVM_IRQCHIP_IOAPIC;
+ kvm_get_irqchip(kvm_context, &chip);
+ kioapic = &chip.chip.ioapic;
+
+ s->id = kioapic->id;
+ s->ioregsel = kioapic->ioregsel;
+ s->base_address = kioapic->base_address;
+ s->irr = kioapic->irr;
+ for (i = 0; i < IOAPIC_NUM_PINS; i++) {
+ s->ioredtbl[i] = kioapic->redirtbl[i].bits;
+ }
+#endif
+}
+
+static void kvm_kernel_ioapic_load_from_user(IOAPICState *s)
+{
+#if defined(KVM_CAP_IRQCHIP) && defined(TARGET_I386)
+ struct kvm_irqchip chip;
+ struct kvm_ioapic_state *kioapic;
+ int i;
+
+ chip.chip_id = KVM_IRQCHIP_IOAPIC;
+ kioapic = &chip.chip.ioapic;
+
+ kioapic->id = s->id;
+ kioapic->ioregsel = s->ioregsel;
+ kioapic->base_address = s->base_address;
+ kioapic->irr = s->irr;
+ for (i = 0; i < IOAPIC_NUM_PINS; i++) {
+ kioapic->redirtbl[i].bits = s->ioredtbl[i];
+ }
+
+ kvm_set_irqchip(kvm_context, &chip);
+#endif
+}
+
+static void ioapic_pre_save(void *opaque)
+{
+ IOAPICState *s = (void *)opaque;
+
+ if (kvm_enabled() && kvm_irqchip_in_kernel()) {
+ kvm_kernel_ioapic_save_to_user(s);
+ }
+}
+
+static int ioapic_pre_load(void *opaque)
+{
+ IOAPICState *s = opaque;
+
+ /* in case we are doing version 1, we just set these to sane values */
+ s->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
+ s->irr = 0;
+ return 0;
+}
+
+static int ioapic_post_load(void *opaque, int version_id)
+{
+ IOAPICState *s = opaque;
+
+ if (kvm_enabled() && kvm_irqchip_in_kernel()) {
+ kvm_kernel_ioapic_load_from_user(s);
+ }
+ return 0;
+}
+
static const VMStateDescription vmstate_ioapic = {
.name = "ioapic",
- .version_id = 1,
+ .version_id = 2,
.minimum_version_id = 1,
.minimum_version_id_old = 1,
+ .pre_load = ioapic_pre_load,
+ .post_load = ioapic_post_load,
+ .pre_save = ioapic_pre_save,
.fields = (VMStateField []) {
VMSTATE_UINT8(id, IOAPICState),
VMSTATE_UINT8(ioregsel, IOAPICState),
+ VMSTATE_UINT64_V(base_address, IOAPICState, 2),
+ VMSTATE_UINT32_V(irr, IOAPICState, 2),
VMSTATE_UINT64_ARRAY(ioredtbl, IOAPICState, IOAPIC_NUM_PINS),
VMSTATE_END_OF_LIST()
}
@@ -210,8 +293,14 @@ static void ioapic_reset(void *opaque)
int i;
memset(s, 0, sizeof(*s));
+ s->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
for(i = 0; i < IOAPIC_NUM_PINS; i++)
s->ioredtbl[i] = 1 << 16; /* mask LVT */
+#ifdef KVM_CAP_IRQCHIP
+ if (kvm_enabled() && kvm_irqchip_in_kernel()) {
+ kvm_kernel_ioapic_load_from_user(s);
+ }
+#endif
}
static CPUReadMemoryFunc * const ioapic_mem_read[3] = {
diff --git a/hw/ipf.c b/hw/ipf.c
new file mode 100644
index 000000000..21cff72b7
--- /dev/null
+++ b/hw/ipf.c
@@ -0,0 +1,713 @@
+/*
+ * Itanium Platform Emulator derived from QEMU PC System Emulator
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * Copyright (c) 2007 Intel
+ * Ported for IA64 Platform Zhang Xiantao <xiantao.zhang@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "hw.h"
+#include "pc.h"
+#include "fdc.h"
+#include "pci.h"
+#include "block.h"
+#include "sysemu.h"
+#include "audio/audio.h"
+#include "net.h"
+#include "smbus.h"
+#include "boards.h"
+#include "firmware.h"
+#include "ia64intrin.h"
+#include <unistd.h>
+#include "device-assignment.h"
+#include "virtio-blk.h"
+
+#include "qemu-kvm.h"
+
+#define FW_FILENAME "Flash.fd"
+
+/* Leave a chunk of memory at the top of RAM for the BIOS ACPI tables. */
+#define ACPI_DATA_SIZE 0x10000
+
+#define MAX_IDE_BUS 2
+
+static fdctrl_t *floppy_controller;
+static RTCState *rtc_state;
+static PCIDevice *i440fx_state;
+
+static uint32_t ipf_to_legacy_io(target_phys_addr_t addr)
+{
+ return (uint32_t)(((addr&0x3ffffff) >> 12 << 2)|((addr) & 0x3));
+}
+
+static void ipf_legacy_io_writeb(void *opaque, target_phys_addr_t addr,
+ uint32_t val) {
+ uint32_t port = ipf_to_legacy_io(addr);
+
+ cpu_outb(0, port, val);
+}
+
+static void ipf_legacy_io_writew(void *opaque, target_phys_addr_t addr,
+ uint32_t val) {
+ uint32_t port = ipf_to_legacy_io(addr);
+
+ cpu_outw(0, port, val);
+}
+
+static void ipf_legacy_io_writel(void *opaque, target_phys_addr_t addr,
+ uint32_t val) {
+ uint32_t port = ipf_to_legacy_io(addr);
+
+ cpu_outl(0, port, val);
+}
+
+static uint32_t ipf_legacy_io_readb(void *opaque, target_phys_addr_t addr)
+{
+ uint32_t port = ipf_to_legacy_io(addr);
+
+ return cpu_inb(0, port);
+}
+
+static uint32_t ipf_legacy_io_readw(void *opaque, target_phys_addr_t addr)
+{
+ uint32_t port = ipf_to_legacy_io(addr);
+
+ return cpu_inw(0, port);
+}
+
+static uint32_t ipf_legacy_io_readl(void *opaque, target_phys_addr_t addr)
+{
+ uint32_t port = ipf_to_legacy_io(addr);
+
+ return cpu_inl(0, port);
+}
+
+static CPUReadMemoryFunc *ipf_legacy_io_read[3] = {
+ ipf_legacy_io_readb,
+ ipf_legacy_io_readw,
+ ipf_legacy_io_readl,
+};
+
+static CPUWriteMemoryFunc *ipf_legacy_io_write[3] = {
+ ipf_legacy_io_writeb,
+ ipf_legacy_io_writew,
+ ipf_legacy_io_writel,
+};
+
+static void pic_irq_request(void *opaque, int irq, int level)
+{
+ fprintf(stderr,"pic_irq_request called!\n");
+}
+
+/* PC cmos mappings */
+
+#define REG_EQUIPMENT_BYTE 0x14
+
+static int cmos_get_fd_drive_type(int fd0)
+{
+ int val;
+
+ switch (fd0) {
+ case 0:
+ /* 1.44 Mb 3"5 drive */
+ val = 4;
+ break;
+ case 1:
+ /* 2.88 Mb 3"5 drive */
+ val = 5;
+ break;
+ case 2:
+ /* 1.2 Mb 5"5 drive */
+ val = 2;
+ break;
+ default:
+ val = 0;
+ break;
+ }
+ return val;
+}
+
+static void cmos_init_hd(int type_ofs, int info_ofs, BlockDriverState *hd)
+{
+ RTCState *s = rtc_state;
+ int cylinders, heads, sectors;
+
+ bdrv_get_geometry_hint(hd, &cylinders, &heads, &sectors);
+ rtc_set_memory(s, type_ofs, 47);
+ rtc_set_memory(s, info_ofs, cylinders);
+ rtc_set_memory(s, info_ofs + 1, cylinders >> 8);
+ rtc_set_memory(s, info_ofs + 2, heads);
+ rtc_set_memory(s, info_ofs + 3, 0xff);
+ rtc_set_memory(s, info_ofs + 4, 0xff);
+ rtc_set_memory(s, info_ofs + 5, 0xc0 | ((heads > 8) << 3));
+ rtc_set_memory(s, info_ofs + 6, cylinders);
+ rtc_set_memory(s, info_ofs + 7, cylinders >> 8);
+ rtc_set_memory(s, info_ofs + 8, sectors);
+}
+
+/* convert boot_device letter to something recognizable by the bios */
+static int boot_device2nibble(char boot_device)
+{
+ switch(boot_device) {
+ case 'a':
+ case 'b':
+ return 0x01; /* floppy boot */
+ case 'c':
+ return 0x02; /* hard drive boot */
+ case 'd':
+ return 0x03; /* CD-ROM boot */
+ case 'n':
+ return 0x04; /* Network boot */
+ }
+ return 0;
+}
+
+/* hd_table must contain 4 block drivers */
+static void cmos_init(ram_addr_t ram_size, ram_addr_t above_4g_mem_size,
+ const char *boot_device, BlockDriverState **hd_table)
+{
+ RTCState *s = rtc_state;
+ int nbds, bds[3] = { 0, };
+ int val;
+ int fd0, fd1, nb;
+ int i;
+
+ /* various important CMOS locations needed by PC/Bochs bios */
+
+ /* memory size */
+ val = 640; /* base memory in K */
+ rtc_set_memory(s, 0x15, val);
+ rtc_set_memory(s, 0x16, val >> 8);
+
+ val = (ram_size / 1024) - 1024;
+ if (val > 65535)
+ val = 65535;
+ rtc_set_memory(s, 0x17, val);
+ rtc_set_memory(s, 0x18, val >> 8);
+ rtc_set_memory(s, 0x30, val);
+ rtc_set_memory(s, 0x31, val >> 8);
+
+ if (above_4g_mem_size) {
+ rtc_set_memory(s, 0x5b, (unsigned int)above_4g_mem_size >> 16);
+ rtc_set_memory(s, 0x5c, (unsigned int)above_4g_mem_size >> 24);
+ rtc_set_memory(s, 0x5d, above_4g_mem_size >> 32);
+ }
+ rtc_set_memory(s, 0x5f, smp_cpus - 1);
+
+ if (ram_size > (16 * 1024 * 1024))
+ val = (ram_size / 65536) - ((16 * 1024 * 1024) / 65536);
+ else
+ val = 0;
+ if (val > 65535)
+ val = 65535;
+ rtc_set_memory(s, 0x34, val);
+ rtc_set_memory(s, 0x35, val >> 8);
+
+ /* set boot devices, and disable floppy signature check if requested */
+#define PC_MAX_BOOT_DEVICES 3
+ nbds = strlen(boot_device);
+
+ if (nbds > PC_MAX_BOOT_DEVICES) {
+ fprintf(stderr, "Too many boot devices for PC\n");
+ exit(1);
+ }
+
+ for (i = 0; i < nbds; i++) {
+ bds[i] = boot_device2nibble(boot_device[i]);
+ if (bds[i] == 0) {
+ fprintf(stderr, "Invalid boot device for PC: '%c'\n",
+ boot_device[i]);
+ exit(1);
+ }
+ }
+
+ rtc_set_memory(s, 0x3d, (bds[1] << 4) | bds[0]);
+ rtc_set_memory(s, 0x38, (bds[2] << 4) | (fd_bootchk ? 0x0 : 0x1));
+
+ /* floppy type */
+
+ fd0 = fdctrl_get_drive_type(floppy_controller, 0);
+ fd1 = fdctrl_get_drive_type(floppy_controller, 1);
+
+ val = (cmos_get_fd_drive_type(fd0) << 4) | cmos_get_fd_drive_type(fd1);
+ rtc_set_memory(s, 0x10, val);
+
+ val = 0;
+ nb = 0;
+ if (fd0 < 3)
+ nb++;
+ if (fd1 < 3)
+ nb++;
+
+ switch (nb) {
+ case 0:
+ break;
+ case 1:
+ val |= 0x01; /* 1 drive, ready for boot */
+ break;
+ case 2:
+ val |= 0x41; /* 2 drives, ready for boot */
+ break;
+ }
+
+ val |= 0x02; /* FPU is there */
+ val |= 0x04; /* PS/2 mouse installed */
+ rtc_set_memory(s, REG_EQUIPMENT_BYTE, val);
+
+ /* hard drives */
+
+ rtc_set_memory(s, 0x12, (hd_table[0] ? 0xf0 : 0) | (hd_table[1] ? 0x0f : 0));
+ if (hd_table[0])
+ cmos_init_hd(0x19, 0x1b, hd_table[0]);
+ if (hd_table[1])
+ cmos_init_hd(0x1a, 0x24, hd_table[1]);
+
+ val = 0;
+ for (i = 0; i < 4; i++) {
+ if (hd_table[i]) {
+ int cylinders, heads, sectors, translation;
+ /* NOTE: bdrv_get_geometry_hint() returns the physical
+ geometry. It is always such that: 1 <= sects <= 63, 1
+ <= heads <= 16, 1 <= cylinders <= 16383. The BIOS
+ geometry can be different if a translation is done. */
+ translation = bdrv_get_translation_hint(hd_table[i]);
+ if (translation == BIOS_ATA_TRANSLATION_AUTO) {
+ bdrv_get_geometry_hint(hd_table[i], &cylinders,
+ &heads, &sectors);
+ if (cylinders <= 1024 && heads <= 16 && sectors <= 63) {
+ /* No translation. */
+ translation = 0;
+ } else {
+ /* LBA translation. */
+ translation = 1;
+ }
+ } else {
+ translation--;
+ }
+ val |= translation << (i * 2);
+ }
+ }
+ rtc_set_memory(s, 0x39, val);
+}
+
+static void main_cpu_reset(void *opaque)
+{
+ CPUState *env = opaque;
+ cpu_reset(env);
+}
+
+static const int ide_iobase[2] = { 0x1f0, 0x170 };
+static const int ide_iobase2[2] = { 0x3f6, 0x376 };
+static const int ide_irq[2] = { 14, 15 };
+
+#define NE2000_NB_MAX 6
+
+static int ne2000_io[NE2000_NB_MAX] = { 0x300, 0x320, 0x340,
+ 0x360, 0x280, 0x380 };
+static int ne2000_irq[NE2000_NB_MAX] = { 9, 10, 11, 3, 4, 5 };
+
+static int serial_io[MAX_SERIAL_PORTS] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8 };
+static int serial_irq[MAX_SERIAL_PORTS] = { 4, 3, 4, 3 };
+
+static int parallel_io[MAX_PARALLEL_PORTS] = { 0x378, 0x278, 0x3bc };
+static int parallel_irq[MAX_PARALLEL_PORTS] = { 7, 7, 7 };
+
+#ifdef HAS_AUDIO
+static void audio_init (PCIBus *pci_bus, qemu_irq *pic)
+{
+ struct soundhw *c;
+ int audio_enabled = 0;
+
+ for (c = soundhw; !audio_enabled && c->name; ++c) {
+ audio_enabled = c->enabled;
+ }
+
+ if (audio_enabled) {
+ AudioState *s;
+
+ s = AUD_init ();
+ if (s) {
+ for (c = soundhw; c->name; ++c) {
+ if (c->enabled) {
+ if (c->isa) {
+ c->init.init_isa (s, pic);
+ } else {
+ if (pci_bus) {
+ c->init.init_pci (pci_bus, s);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+#endif
+
+static void pc_init_ne2k_isa(NICInfo *nd, qemu_irq *pic)
+{
+ static int nb_ne2k = 0;
+
+ if (nb_ne2k == NE2000_NB_MAX)
+ return;
+ isa_ne2000_init(ne2000_io[nb_ne2k], pic[ne2000_irq[nb_ne2k]], nd);
+ nb_ne2k++;
+}
+
+/* Itanium hardware initialisation */
+static void ipf_init1(ram_addr_t ram_size,
+ const char *boot_device, DisplayState *ds,
+ const char *kernel_filename, const char *kernel_cmdline,
+ const char *initrd_filename,
+ int pci_enabled, const char *cpu_model)
+{
+ char buf[1024];
+ int i;
+ ram_addr_t ram_addr;
+ ram_addr_t above_4g_mem_size = 0;
+ PCIBus *pci_bus;
+ PCIDevice *pci_dev;
+ int piix3_devfn = -1;
+ CPUState *env;
+ qemu_irq *cpu_irq;
+ qemu_irq *i8259;
+ int page_size;
+ int index;
+ unsigned long ipf_legacy_io_base, ipf_legacy_io_mem;
+ BlockDriverState *hd[MAX_IDE_BUS * MAX_IDE_DEVS];
+ BlockDriverState *fd[MAX_FD];
+
+ page_size = getpagesize();
+ if (page_size != TARGET_PAGE_SIZE) {
+ fprintf(stderr,"Error! Host page size != qemu target page size,"
+ " you may need to change TARGET_PAGE_BITS in qemu!"
+ "host page size:0x%x\n", page_size);
+ exit(-1);
+ };
+
+ if (ram_size >= 0xc0000000 ) {
+ above_4g_mem_size = ram_size - 0xc0000000;
+ ram_size = 0xc0000000;
+ }
+
+ /* init CPUs */
+ if (cpu_model == NULL) {
+ cpu_model = "IA64";
+ }
+
+ for(i = 0; i < smp_cpus; i++) {
+ env = cpu_init(cpu_model);
+ if (!env) {
+ fprintf(stderr, "Unable to find CPU definition\n");
+ exit(1);
+ }
+ if (i != 0)
+ env->hflags |= HF_HALTED_MASK;
+ register_savevm("cpu", i, 4, cpu_save, cpu_load, env);
+ qemu_register_reset(main_cpu_reset, 0, env);
+ }
+
+ /* allocate RAM */
+ if (kvm_enabled()) {
+ ram_addr = qemu_ram_alloc(0xa0000);
+ cpu_register_physical_memory(0, 0xa0000, ram_addr);
+
+ ram_addr = qemu_ram_alloc(0x20000); // Workaround 0xa0000-0xc0000
+
+ ram_addr = qemu_ram_alloc(0x40000);
+ cpu_register_physical_memory(0xc0000, 0x40000, ram_addr);
+
+ ram_addr = qemu_ram_alloc(ram_size - 0x100000);
+ cpu_register_physical_memory(0x100000, ram_size - 0x100000, ram_addr);
+ } else {
+ ram_addr = qemu_ram_alloc(ram_size);
+ cpu_register_physical_memory(0, ram_size, ram_addr);
+ }
+
+ /* above 4giga memory allocation */
+ if (above_4g_mem_size > 0) {
+ ram_addr = qemu_ram_alloc(above_4g_mem_size);
+ cpu_register_physical_memory(0x100000000, above_4g_mem_size, ram_addr);
+ }
+
+ /*Load firware to its proper position.*/
+ if (kvm_enabled()) {
+ unsigned long image_size;
+ uint8_t *image = NULL;
+ unsigned long nvram_addr;
+ unsigned long nvram_fd = 0;
+ unsigned long type = READ_FROM_NVRAM;
+ unsigned long i = 0;
+ unsigned long fw_offset;
+ ram_addr_t fw_mem = qemu_ram_alloc(GFW_SIZE);
+
+ snprintf(buf, sizeof(buf), "%s/%s", bios_dir, FW_FILENAME);
+ image = read_image(buf, &image_size );
+ if (NULL == image || !image_size) {
+ fprintf(stderr, "Error when reading Guest Firmware!\n");
+ fprintf(stderr, "Please check Guest firmware at %s\n", buf);
+ exit(1);
+ }
+ fw_offset = GFW_START + GFW_SIZE - image_size;
+
+ cpu_register_physical_memory(GFW_START, GFW_SIZE, fw_mem);
+ cpu_physical_memory_write(fw_offset, image, image_size);
+
+ free(image);
+
+ if (nvram) {
+ nvram_addr = NVRAM_START;
+ nvram_fd = kvm_ia64_nvram_init(type);
+ if (nvram_fd != -1) {
+ kvm_ia64_copy_from_nvram_to_GFW(nvram_fd);
+ close(nvram_fd);
+ }
+ i = atexit((void *)kvm_ia64_copy_from_GFW_to_nvram);
+ if (i != 0)
+ fprintf(stderr, "cannot set exit function\n");
+ } else
+ nvram_addr = 0;
+
+ kvm_ia64_build_hob(ram_size + above_4g_mem_size, smp_cpus, nvram_addr);
+ }
+
+ /*Register legacy io address space, size:64M*/
+ ipf_legacy_io_base = 0xE0000000;
+ ipf_legacy_io_mem = cpu_register_io_memory(0, ipf_legacy_io_read,
+ ipf_legacy_io_write, NULL);
+ cpu_register_physical_memory(ipf_legacy_io_base, 64*1024*1024,
+ ipf_legacy_io_mem);
+
+ cpu_irq = qemu_allocate_irqs(pic_irq_request, first_cpu, 1);
+ i8259 = kvm_i8259_init(cpu_irq[0]);
+
+ if (pci_enabled) {
+ pci_bus = i440fx_init(&i440fx_state, i8259);
+ piix3_devfn = piix3_init(pci_bus, -1);
+ } else {
+ pci_bus = NULL;
+ }
+
+ if (cirrus_vga_enabled) {
+ if (pci_enabled)
+ pci_cirrus_vga_init(pci_bus);
+ else
+ isa_cirrus_vga_init();
+ } else {
+ if (pci_enabled)
+ pci_vga_init(pci_bus, 0, 0);
+ else
+ isa_vga_init();
+ }
+
+ rtc_state = rtc_init(0x70, i8259[8], 2000);
+
+ if (pci_enabled) {
+ pic_set_alt_irq_func(isa_pic, NULL, NULL);
+ }
+
+ for(i = 0; i < MAX_SERIAL_PORTS; i++) {
+ if (serial_hds[i]) {
+ serial_init(serial_io[i], i8259[serial_irq[i]], 115200,
+ serial_hds[i]);
+ }
+ }
+
+ for(i = 0; i < MAX_PARALLEL_PORTS; i++) {
+ if (parallel_hds[i]) {
+ parallel_init(parallel_io[i], i8259[parallel_irq[i]],
+ parallel_hds[i]);
+ }
+ }
+
+ for(i = 0; i < nb_nics; i++) {
+ NICInfo *nd = &nd_table[i];
+
+ if (!pci_enabled || (nd->model && strcmp(nd->model, "ne2k_isa") == 0))
+ pc_init_ne2k_isa(nd, i8259);
+ else
+ pci_nic_init(nd, "e1000", NULL);
+ }
+
+#undef USE_HYPERCALL //Disable it now, need to implement later!
+#ifdef USE_HYPERCALL
+ pci_hypercall_init(pci_bus);
+#endif
+
+ if (drive_get_max_bus(IF_IDE) >= MAX_IDE_BUS) {
+ fprintf(stderr, "qemu: too many IDE bus\n");
+ exit(1);
+ }
+
+ for(i = 0; i < MAX_IDE_BUS * MAX_IDE_DEVS; i++) {
+ index = drive_get_index(IF_IDE, i / MAX_IDE_DEVS, i % MAX_IDE_DEVS);
+ if (index != -1)
+ hd[i] = drives_table[index].bdrv;
+ else
+ hd[i] = NULL;
+ }
+
+ if (pci_enabled) {
+ pci_piix3_ide_init(pci_bus, hd, piix3_devfn + 1, i8259);
+ } else {
+ for(i = 0; i < MAX_IDE_BUS; i++) {
+ isa_ide_init(ide_iobase[i], ide_iobase2[i], i8259[ide_irq[i]],
+ hd[MAX_IDE_DEVS * i], hd[MAX_IDE_DEVS * i + 1]);
+ }
+ }
+
+ i8042_init(i8259[1], i8259[12], 0x60);
+ DMA_init(0);
+#ifdef HAS_AUDIO
+ audio_init(pci_enabled ? pci_bus : NULL, i8259);
+#endif
+
+ for(i = 0; i < MAX_FD; i++) {
+ index = drive_get_index(IF_FLOPPY, 0, i);
+ if (index != -1)
+ fd[i] = drives_table[index].bdrv;
+ else
+ fd[i] = NULL;
+ }
+ floppy_controller = fdctrl_init(i8259[6], 2, 0, 0x3f0, fd);
+
+ cmos_init(ram_size, above_4g_mem_size, boot_device, hd);
+
+ if (pci_enabled && usb_enabled) {
+ usb_uhci_piix3_init(pci_bus, piix3_devfn + 2);
+ }
+
+ if (pci_enabled && acpi_enabled) {
+ uint8_t *eeprom_buf = qemu_mallocz(8 * 256); /* XXX: make this persistent */
+ i2c_bus *smbus;
+
+ /* TODO: Populate SPD eeprom data. */
+ smbus = piix4_pm_init(pci_bus, piix3_devfn + 3, 0xb100, i8259[9]);
+ for (i = 0; i < 8; i++) {
+ DeviceState *eeprom;
+ eeprom = qdev_create((BusState *)smbus, "smbus-eeprom");
+ qdev_set_prop_int(eeprom, "address", 0x50 + i);
+ qdev_set_prop_ptr(eeprom, "data", eeprom_buf + (i * 256));
+ qdev_init(eeprom);
+ }
+ }
+
+ if (i440fx_state) {
+ i440fx_init_memory_mappings(i440fx_state);
+ }
+
+ if (pci_enabled) {
+ int max_bus;
+ int bus;
+
+ max_bus = drive_get_max_bus(IF_SCSI);
+ for (bus = 0; bus <= max_bus; bus++) {
+ pci_create_simple(pci_bus, -1, "lsi53c895a");
+ }
+ }
+ /* Add virtio block devices */
+ if (pci_enabled) {
+ int index;
+ int unit_id = 0;
+
+ while ((index = drive_get_index(IF_VIRTIO, 0, unit_id)) != -1) {
+ pci_dev = pci_create("virtio-blk-pci",
+ drives_table[index].devaddr);
+ qdev_init(&pci_dev->qdev);
+ unit_id++;
+ }
+ }
+
+#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
+ if (kvm_enabled())
+ add_assigned_devices(pci_bus, assigned_devices, assigned_devices_index);
+#endif /* CONFIG_KVM_DEVICE_ASSIGNMENT */
+
+}
+
+static void ipf_init_pci(ram_addr_t ram_size,
+ const char *boot_device, DisplayState *ds,
+ const char *kernel_filename,
+ const char *kernel_cmdline,
+ const char *initrd_filename,
+ const char *cpu_model)
+{
+ ipf_init1(ram_size, boot_device, ds, kernel_filename,
+ kernel_cmdline, initrd_filename, 1, cpu_model);
+}
+
+QEMUMachine ipf_machine = {
+ .name = "itanium",
+ .desc = "Itanium Platform",
+ .init = (QEMUMachineInitFunc *)ipf_init_pci,
+ .max_cpus = 255,
+ .is_default = 1,
+};
+
+static void ipf_machine_init(void)
+{
+ qemu_register_machine(&ipf_machine);
+}
+
+machine_init(ipf_machine_init);
+
+#define IOAPIC_NUM_PINS 48
+
+static int ioapic_irq_count[IOAPIC_NUM_PINS];
+
+static int ioapic_map_irq(int devfn, int irq_num)
+{
+ int irq, dev;
+ dev = devfn >> 3;
+ irq = ((((dev << 2) + (dev >> 3) + irq_num) & 31) + 16);
+ return irq;
+}
+
+/*
+ * Dummy function to provide match for call from hw/apic.c
+ */
+void apic_set_irq_delivered(void) {
+}
+
+void ioapic_set_irq(void *opaque, int irq_num, int level)
+{
+ int vector, pic_ret;
+
+ PCIDevice *pci_dev = (PCIDevice *)opaque;
+ vector = ioapic_map_irq(pci_dev->devfn, irq_num);
+
+ if (level)
+ ioapic_irq_count[vector] += 1;
+ else
+ ioapic_irq_count[vector] -= 1;
+
+ if (kvm_enabled()) {
+ if (kvm_set_irq(vector, ioapic_irq_count[vector] == 0, &pic_ret))
+ if (pic_ret != 0)
+ apic_set_irq_delivered();
+ return;
+ }
+}
+
+int ipf_map_irq(PCIDevice *pci_dev, int irq_num)
+{
+ return ioapic_map_irq(pci_dev->devfn, irq_num);
+}
diff --git a/hw/msix.c b/hw/msix.c
index 0baedef42..d117bcfaa 100644
--- a/hw/msix.c
+++ b/hw/msix.c
@@ -14,6 +14,8 @@
#include "hw.h"
#include "msix.h"
#include "pci.h"
+#define QEMU_KVM_NO_CPU
+#include "qemu-kvm.h"
/* Declaration from linux/pci_regs.h */
#define PCI_CAP_ID_MSIX 0x11 /* MSI-X */
@@ -62,6 +64,117 @@
/* Flag for interrupt controller to declare MSI-X support */
int msix_supported;
+#ifdef CONFIG_KVM
+/* KVM specific MSIX helpers */
+static void kvm_msix_free(PCIDevice *dev)
+{
+ int vector, changed = 0;
+ for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
+ if (dev->msix_entry_used[vector]) {
+ kvm_del_routing_entry(kvm_context, &dev->msix_irq_entries[vector]);
+ changed = 1;
+ }
+ }
+ if (changed) {
+ kvm_commit_irq_routes(kvm_context);
+ }
+}
+
+static void kvm_msix_routing_entry(PCIDevice *dev, unsigned vector,
+ struct kvm_irq_routing_entry *entry)
+{
+ uint8_t *table_entry = dev->msix_table_page + vector * MSIX_ENTRY_SIZE;
+ entry->type = KVM_IRQ_ROUTING_MSI;
+ entry->flags = 0;
+ entry->u.msi.address_lo = pci_get_long(table_entry + MSIX_MSG_ADDR);
+ entry->u.msi.address_hi = pci_get_long(table_entry + MSIX_MSG_UPPER_ADDR);
+ entry->u.msi.data = pci_get_long(table_entry + MSIX_MSG_DATA);
+}
+
+static void kvm_msix_update(PCIDevice *dev, int vector,
+ int was_masked, int is_masked)
+{
+ struct kvm_irq_routing_entry e = {}, *entry;
+ int mask_cleared = was_masked && !is_masked;
+ /* It is only legal to change an entry when it is masked. Therefore, it is
+ * enough to update the routing in kernel when mask is being cleared. */
+ if (!mask_cleared) {
+ return;
+ }
+ if (!dev->msix_entry_used[vector]) {
+ return;
+ }
+ entry = dev->msix_irq_entries + vector;
+ e.gsi = entry->gsi;
+ kvm_msix_routing_entry(dev, vector, &e);
+ if (memcmp(&entry->u.msi, &e.u.msi, sizeof entry->u.msi)) {
+ int r;
+ r = kvm_update_routing_entry(kvm_context, entry, &e);
+ if (r) {
+ fprintf(stderr, "%s: kvm_update_routing_entry failed: %s\n", __func__,
+ strerror(-r));
+ exit(1);
+ }
+ memcpy(&entry->u.msi, &e.u.msi, sizeof entry->u.msi);
+ r = kvm_commit_irq_routes(kvm_context);
+ if (r) {
+ fprintf(stderr, "%s: kvm_commit_irq_routes failed: %s\n", __func__,
+ strerror(-r));
+ exit(1);
+ }
+ }
+}
+
+static int kvm_msix_add(PCIDevice *dev, unsigned vector)
+{
+ struct kvm_irq_routing_entry *entry = dev->msix_irq_entries + vector;
+ int r;
+
+ if (!kvm_has_gsi_routing(kvm_context)) {
+ fprintf(stderr, "Warning: no MSI-X support found. "
+ "At least kernel 2.6.30 is required for MSI-X support.\n"
+ );
+ return -EOPNOTSUPP;
+ }
+
+ r = kvm_get_irq_route_gsi(kvm_context);
+ if (r < 0) {
+ fprintf(stderr, "%s: kvm_get_irq_route_gsi failed: %s\n", __func__, strerror(-r));
+ return r;
+ }
+ entry->gsi = r;
+ kvm_msix_routing_entry(dev, vector, entry);
+ r = kvm_add_routing_entry(kvm_context, entry);
+ if (r < 0) {
+ fprintf(stderr, "%s: kvm_add_routing_entry failed: %s\n", __func__, strerror(-r));
+ return r;
+ }
+
+ r = kvm_commit_irq_routes(kvm_context);
+ if (r < 0) {
+ fprintf(stderr, "%s: kvm_commit_irq_routes failed: %s\n", __func__, strerror(-r));
+ return r;
+ }
+ return 0;
+}
+
+static void kvm_msix_del(PCIDevice *dev, unsigned vector)
+{
+ if (dev->msix_entry_used[vector]) {
+ return;
+ }
+ kvm_del_routing_entry(kvm_context, &dev->msix_irq_entries[vector]);
+ kvm_commit_irq_routes(kvm_context);
+}
+#else
+
+static void kvm_msix_free(PCIDevice *dev) {}
+static void kvm_msix_update(PCIDevice *dev, int vector,
+ int was_masked, int is_masked) {}
+static int kvm_msix_add(PCIDevice *dev, unsigned vector) { return -1; }
+static void kvm_msix_del(PCIDevice *dev, unsigned vector) {}
+#endif
+
/* Add MSI-X capability to the config space for the device. */
/* Given a bar and its size, add MSI-X table on top of it
* and fill MSI-X capability in the config space.
@@ -200,7 +313,11 @@ static void msix_mmio_writel(void *opaque, target_phys_addr_t addr,
PCIDevice *dev = opaque;
unsigned int offset = addr & (MSIX_PAGE_SIZE - 1) & ~0x3;
int vector = offset / MSIX_ENTRY_SIZE;
+ int was_masked = msix_is_masked(dev, vector);
pci_set_long(dev->msix_table_page + offset, val);
+ if (kvm_enabled() && kvm_irqchip_in_kernel()) {
+ kvm_msix_update(dev, vector, was_masked, msix_is_masked(dev, vector));
+ }
msix_handle_mask_update(dev, vector);
}
@@ -259,6 +376,12 @@ int msix_init(struct PCIDevice *dev, unsigned short nentries,
if (nentries > MSIX_MAX_ENTRIES)
return -EINVAL;
+#ifdef KVM_CAP_IRQCHIP
+ if (kvm_enabled() && kvm_irqchip_in_kernel()) {
+ dev->msix_irq_entries = qemu_malloc(nentries *
+ sizeof *dev->msix_irq_entries);
+ }
+#endif
dev->msix_entry_used = qemu_mallocz(MSIX_MAX_ENTRIES *
sizeof *dev->msix_entry_used);
@@ -295,6 +418,10 @@ static void msix_free_irq_entries(PCIDevice *dev)
{
int vector;
+ if (kvm_enabled() && kvm_irqchip_in_kernel()) {
+ kvm_msix_free(dev);
+ }
+
for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
dev->msix_entry_used[vector] = 0;
msix_clr_pending(dev, vector);
@@ -315,6 +442,8 @@ int msix_uninit(PCIDevice *dev)
dev->msix_table_page = NULL;
qemu_free(dev->msix_entry_used);
dev->msix_entry_used = NULL;
+ qemu_free(dev->msix_irq_entries);
+ dev->msix_irq_entries = NULL;
dev->cap_present &= ~QEMU_PCI_CAP_MSIX;
return 0;
}
@@ -323,10 +452,13 @@ void msix_save(PCIDevice *dev, QEMUFile *f)
{
unsigned n = dev->msix_entries_nr;
- if (!(dev->cap_present & QEMU_PCI_CAP_MSIX)) {
+ if (!msix_supported) {
return;
}
+ if (!(dev->cap_present & QEMU_PCI_CAP_MSIX)) {
+ return;
+ }
qemu_put_buffer(f, dev->msix_table_page, n * MSIX_ENTRY_SIZE);
qemu_put_buffer(f, dev->msix_table_page + MSIX_PAGE_PENDING, (n + 7) / 8);
}
@@ -336,6 +468,9 @@ void msix_load(PCIDevice *dev, QEMUFile *f)
{
unsigned n = dev->msix_entries_nr;
+ if (!msix_supported)
+ return;
+
if (!(dev->cap_present & QEMU_PCI_CAP_MSIX)) {
return;
}
@@ -380,6 +515,13 @@ void msix_notify(PCIDevice *dev, unsigned vector)
return;
}
+#ifdef KVM_CAP_IRQCHIP
+ if (kvm_enabled() && kvm_irqchip_in_kernel()) {
+ kvm_set_irq(dev->msix_irq_entries[vector].gsi, 1, NULL);
+ return;
+ }
+#endif
+
address = pci_get_long(table_entry + MSIX_MSG_UPPER_ADDR);
address = (address << 32) | pci_get_long(table_entry + MSIX_MSG_ADDR);
data = pci_get_long(table_entry + MSIX_MSG_DATA);
@@ -408,9 +550,19 @@ void msix_reset(PCIDevice *dev)
/* Mark vector as used. */
int msix_vector_use(PCIDevice *dev, unsigned vector)
{
+ int ret;
if (vector >= dev->msix_entries_nr)
return -EINVAL;
- dev->msix_entry_used[vector]++;
+ if (dev->msix_entry_used[vector]) {
+ return 0;
+ }
+ if (kvm_enabled() && kvm_irqchip_in_kernel()) {
+ ret = kvm_msix_add(dev, vector);
+ if (ret) {
+ return ret;
+ }
+ }
+ ++dev->msix_entry_used[vector];
return 0;
}
@@ -423,6 +575,9 @@ void msix_vector_unuse(PCIDevice *dev, unsigned vector)
if (--dev->msix_entry_used[vector]) {
return;
}
+ if (kvm_enabled() && kvm_irqchip_in_kernel()) {
+ kvm_msix_del(dev, vector);
+ }
msix_clr_pending(dev, vector);
}
diff --git a/hw/pc.c b/hw/pc.c
index 04ff78d97..632f439cd 100644
--- a/hw/pc.c
+++ b/hw/pc.c
@@ -44,6 +44,9 @@
#include "ide.h"
#include "loader.h"
#include "elf.h"
+#include "device-assignment.h"
+
+#include "qemu-kvm.h"
/* output Bochs bios info messages */
//#define DEBUG_BIOS
@@ -52,6 +55,8 @@
//#define DEBUG_MULTIBOOT
#define BIOS_FILENAME "bios.bin"
+#define EXTBOOT_FILENAME "extboot.bin"
+#define VAPIC_FILENAME "vapic.bin"
#define PC_MAX_BIOS_SIZE (4 * 1024 * 1024)
@@ -69,6 +74,8 @@ static RTCState *rtc_state;
static PITState *pit;
static PCII440FXState *i440fx_state;
+qemu_irq *ioapic_irq_hack;
+
typedef struct isa_irq_state {
qemu_irq *i8259;
qemu_irq *ioapic;
@@ -952,7 +959,7 @@ int cpu_is_bsp(CPUState *env)
return env->cpuid_apic_id == 0;
}
-static CPUState *pc_new_cpu(const char *cpu_model)
+CPUState *pc_new_cpu(const char *cpu_model)
{
CPUState *env;
@@ -961,6 +968,7 @@ static CPUState *pc_new_cpu(const char *cpu_model)
fprintf(stderr, "Unable to find x86 CPU definition\n");
exit(1);
}
+ env->kvm_cpu_state.regs_modified = 1;
if ((env->cpuid_features & CPUID_APIC) || smp_cpus > 1) {
env->cpuid_apic_id = env->cpu_index;
/* APIC reset callback resets cpu */
@@ -968,6 +976,11 @@ static CPUState *pc_new_cpu(const char *cpu_model)
} else {
qemu_register_reset((QEMUResetHandler*)cpu_reset, env);
}
+
+ /* kvm needs this to run after the apic is initialized. Otherwise,
+ * it can access invalid state and crash.
+ */
+ qemu_init_vcpu(env);
return env;
}
@@ -1015,6 +1028,9 @@ static void pc_init1(ram_addr_t ram_size,
#endif
}
+ if (kvm_enabled()) {
+ kvm_set_boot_cpu_id(0);
+ }
for (i = 0; i < smp_cpus; i++) {
env = pc_new_cpu(cpu_model);
}
@@ -1022,18 +1038,11 @@ static void pc_init1(ram_addr_t ram_size,
vmport_init();
/* allocate RAM */
- ram_addr = qemu_ram_alloc(0xa0000);
+ ram_addr = qemu_ram_alloc(below_4g_mem_size);
cpu_register_physical_memory(0, 0xa0000, ram_addr);
-
- /* Allocate, even though we won't register, so we don't break the
- * phys_ram_base + PA assumption. This range includes vga (0xa0000 - 0xc0000),
- * and some bios areas, which will be registered later
- */
- ram_addr = qemu_ram_alloc(0x100000 - 0xa0000);
- ram_addr = qemu_ram_alloc(below_4g_mem_size - 0x100000);
cpu_register_physical_memory(0x100000,
below_4g_mem_size - 0x100000,
- ram_addr);
+ ram_addr + 0x100000);
/* above 4giga memory allocation */
if (above_4g_mem_size > 0) {
@@ -1075,11 +1084,17 @@ static void pc_init1(ram_addr_t ram_size,
isa_bios_size = bios_size;
if (isa_bios_size > (128 * 1024))
isa_bios_size = 128 * 1024;
+ cpu_register_physical_memory(0xd0000, (192 * 1024) - isa_bios_size,
+ IO_MEM_UNASSIGNED);
+ /* kvm tpr optimization needs the bios accessible for write, at least to qemu itself */
cpu_register_physical_memory(0x100000 - isa_bios_size,
isa_bios_size,
- (bios_offset + bios_size - isa_bios_size) | IO_MEM_ROM);
-
+ (bios_offset + bios_size - isa_bios_size) /* | IO_MEM_ROM */);
+ if (extboot_drive) {
+ option_rom[nb_option_roms++] = qemu_strdup(EXTBOOT_FILENAME);
+ }
+ option_rom[nb_option_roms++] = qemu_strdup(VAPIC_FILENAME);
rom_enable_driver_roms = 1;
option_rom_offset = qemu_ram_alloc(PC_ROM_SIZE);
@@ -1101,10 +1116,18 @@ static void pc_init1(ram_addr_t ram_size,
}
cpu_irq = qemu_allocate_irqs(pic_irq_request, NULL, 1);
- i8259 = i8259_init(cpu_irq[0]);
- isa_irq_state = qemu_mallocz(sizeof(*isa_irq_state));
- isa_irq_state->i8259 = i8259;
- isa_irq = qemu_allocate_irqs(isa_irq_handler, isa_irq_state, 24);
+#ifdef KVM_CAP_IRQCHIP
+ if (kvm_enabled() && kvm_irqchip_in_kernel()) {
+ isa_irq_state = qemu_mallocz(sizeof(*isa_irq_state));
+ isa_irq = i8259 = kvm_i8259_init(cpu_irq[0]);
+ } else
+#endif
+ {
+ i8259 = i8259_init(cpu_irq[0]);
+ isa_irq_state = qemu_mallocz(sizeof(*isa_irq_state));
+ isa_irq_state->i8259 = i8259;
+ isa_irq = qemu_allocate_irqs(isa_irq_handler, isa_irq_state, 24);
+ }
if (pci_enabled) {
pci_bus = i440fx_init(&i440fx_state, &piix3_devfn, isa_irq);
@@ -1149,8 +1172,14 @@ static void pc_init1(ram_addr_t ram_size,
if (pci_enabled) {
isa_irq_state->ioapic = ioapic_init();
+ ioapic_irq_hack = isa_irq;
}
- pit = pit_init(0x40, isa_reserve_irq(0));
+#ifdef CONFIG_KVM_PIT
+ if (kvm_enabled() && qemu_kvm_pit_in_kernel())
+ pit = kvm_pit_init(0x40, isa_reserve_irq(0));
+ else
+#endif
+ pit = pit_init(0x40, isa_reserve_irq(0));
pcspk_init(pit);
if (!no_hpet) {
hpet_init(isa_irq);
@@ -1174,7 +1203,7 @@ static void pc_init1(ram_addr_t ram_size,
if (!pci_enabled || (nd->model && strcmp(nd->model, "ne2k_isa") == 0))
pc_init_ne2k_isa(nd);
else
- pci_nic_init_nofail(nd, "e1000", NULL);
+ pci_nic_init_nofail(nd, "rtl8139", NULL);
}
if (drive_get_max_bus(IF_IDE) >= MAX_IDE_BUS) {
@@ -1226,7 +1255,7 @@ static void pc_init1(ram_addr_t ram_size,
qdev_prop_set_ptr(eeprom, "data", eeprom_buf + (i * 256));
qdev_init_nofail(eeprom);
}
- piix4_acpi_system_hot_add_init(pci_bus);
+ piix4_acpi_system_hot_add_init(pci_bus, cpu_model);
}
if (i440fx_state) {
@@ -1243,6 +1272,18 @@ static void pc_init1(ram_addr_t ram_size,
}
}
+ if (extboot_drive) {
+ DriveInfo *info = extboot_drive;
+ int cyls, heads, secs;
+
+ if (info->type != IF_IDE && info->type != IF_VIRTIO) {
+ bdrv_guess_geometry(info->bdrv, &cyls, &heads, &secs);
+ bdrv_set_geometry_hint(info->bdrv, cyls, heads, secs);
+ }
+
+ extboot_init(info->bdrv, 1);
+ }
+
/* Add virtio console devices */
if (pci_enabled) {
for(i = 0; i < MAX_VIRTIO_CONSOLES; i++) {
@@ -1251,6 +1292,12 @@ static void pc_init1(ram_addr_t ram_size,
}
}
}
+
+#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
+ if (kvm_enabled()) {
+ add_assigned_devices(pci_bus, assigned_devices, assigned_devices_index);
+ }
+#endif /* CONFIG_KVM_DEVICE_ASSIGNMENT */
}
static void pc_init_pci(ram_addr_t ram_size,
diff --git a/hw/pc.h b/hw/pc.h
index 03ffc9153..b00f31107 100644
--- a/hw/pc.h
+++ b/hw/pc.h
@@ -28,6 +28,7 @@ extern PicState2 *isa_pic;
void pic_set_irq(int irq, int level);
void pic_set_irq_new(void *opaque, int irq, int level);
qemu_irq *i8259_init(qemu_irq parent_irq);
+qemu_irq *kvm_i8259_init(qemu_irq parent_irq);
int pic_read_irq(PicState2 *s);
void pic_update_irq(PicState2 *s);
uint32_t pic_intack_read(PicState2 *s);
@@ -48,6 +49,7 @@ qemu_irq *ioapic_init(void);
void ioapic_set_irq(void *opaque, int vector, int level);
void apic_reset_irq_delivered(void);
int apic_get_irq_delivered(void);
+void apic_set_irq_delivered(void);
/* i8254.c */
@@ -62,8 +64,12 @@ int pit_get_initial_count(PITState *pit, int channel);
int pit_get_mode(PITState *pit, int channel);
int pit_get_out(PITState *pit, int channel, int64_t current_time);
-void hpet_pit_disable(void);
-void hpet_pit_enable(void);
+/* i8254-kvm.c */
+
+PITState *kvm_pit_init(int base, qemu_irq irq);
+
+void hpet_disable_pit(void);
+void hpet_enable_pit(void);
/* vmport.c */
void vmport_init(void);
@@ -93,6 +99,7 @@ extern int fd_bootchk;
void ioport_set_a20(int enable);
int ioport_get_a20(void);
+CPUState *pc_new_cpu(const char *cpu_model);
/* acpi.c */
extern int acpi_enabled;
@@ -106,7 +113,7 @@ int acpi_table_add(const char *table_desc);
i2c_bus *piix4_pm_init(PCIBus *bus, int devfn, uint32_t smb_io_base,
qemu_irq sci_irq);
void piix4_smbus_register_device(SMBusDevice *dev, uint8_t addr);
-void piix4_acpi_system_hot_add_init(PCIBus *bus);
+void piix4_acpi_system_hot_add_init(PCIBus *bus, const char *model);
/* hpet.c */
extern int no_hpet;
@@ -116,6 +123,9 @@ void pcspk_init(PITState *);
int pcspk_audio_init(qemu_irq *pic);
/* piix_pci.c */
+/* config space register for IRQ routing */
+#define PIIX_CONFIG_IRQ_ROUTE 0x60
+
struct PCII440FXState;
typedef struct PCII440FXState PCII440FXState;
@@ -127,6 +137,10 @@ void i440fx_init_memory_mappings(PCII440FXState *d);
extern PCIDevice *piix4_dev;
int piix4_init(PCIBus *bus, int devfn);
+int piix_get_irq(int pin);
+
+int ipf_map_irq(PCIDevice *pci_dev, int irq_num);
+
/* vga.c */
enum vga_retrace_method {
VGA_RETRACE_DUMB,
@@ -149,5 +163,10 @@ void isa_cirrus_vga_init(void);
void isa_ne2000_init(int base, int irq, NICInfo *nd);
+/* extboot.c */
+
+void extboot_init(BlockDriverState *bs, int cmd);
+
int cpu_is_bsp(CPUState *env);
+
#endif
diff --git a/hw/pci-hotplug.c b/hw/pci-hotplug.c
index ba13d2bce..41ce0041b 100644
--- a/hw/pci-hotplug.c
+++ b/hw/pci-hotplug.c
@@ -34,6 +34,7 @@
#include "virtio-blk.h"
#include "qemu-config.h"
#include "qemu-objects.h"
+#include "device-assignment.h"
#if defined(TARGET_I386)
static PCIDevice *qemu_pci_hot_add_nic(Monitor *mon,
@@ -225,6 +226,24 @@ static PCIDevice *qemu_pci_hot_add_storage(Monitor *mon,
return dev;
}
+#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
+static PCIDevice *qemu_pci_hot_assign_device(Monitor *mon,
+ const char *devaddr,
+ const char *opts_str)
+{
+ QemuOpts *opts;
+ DeviceState *dev;
+
+ opts = add_assigned_device(opts_str);
+ if (opts == NULL) {
+ monitor_printf(mon, "Error adding device; check syntax\n");
+ return NULL;
+ }
+ dev = qdev_device_add(opts);
+ return DO_UPCAST(PCIDevice, qdev, dev);
+}
+#endif /* CONFIG_KVM_DEVICE_ASSIGNMENT */
+
void pci_device_hot_add_print(Monitor *mon, const QObject *data)
{
QDict *qdict;
@@ -277,6 +296,10 @@ void pci_device_hot_add(Monitor *mon, const QDict *qdict, QObject **ret_data)
dev = qemu_pci_hot_add_nic(mon, pci_addr, opts);
else if (strcmp(type, "storage") == 0)
dev = qemu_pci_hot_add_storage(mon, pci_addr, opts);
+#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
+ else if (strcmp(type, "host") == 0)
+ dev = qemu_pci_hot_assign_device(mon, pci_addr, opts);
+#endif /* CONFIG_KVM_DEVICE_ASSIGNMENT */
else
monitor_printf(mon, "invalid type: %s\n", type);
diff --git a/hw/pci.c b/hw/pci.c
index 8f30f73b7..861d42784 100644
--- a/hw/pci.c
+++ b/hw/pci.c
@@ -27,6 +27,9 @@
#include "net.h"
#include "sysemu.h"
#include "loader.h"
+#include "qemu-kvm.h"
+#include "hw/pc.h"
+#include "device-assignment.h"
//#define DEBUG_PCI
#ifdef DEBUG_PCI
@@ -423,6 +426,7 @@ static int pci_set_default_subsystem_id(PCIDevice *pci_dev)
}
/*
+ * Parse pci address in qemu command
* Parse [[<domain>:]<bus>:]<slot>, return -1 on error
*/
static int pci_parse_devaddr(const char *addr, int *domp, int *busp, unsigned *slotp)
@@ -471,6 +475,55 @@ static int pci_parse_devaddr(const char *addr, int *domp, int *busp, unsigned *s
return 0;
}
+/*
+ * Parse device bdf in device assignment command:
+ *
+ * -pcidevice host=bus:dev.func
+ *
+ * Parse <bus>:<slot>.<func> return -1 on error
+ */
+int pci_parse_host_devaddr(const char *addr, int *busp,
+ int *slotp, int *funcp)
+{
+ const char *p;
+ char *e;
+ int val;
+ int bus = 0, slot = 0, func = 0;
+
+ p = addr;
+ val = strtoul(p, &e, 16);
+ if (e == p)
+ return -1;
+ if (*e == ':') {
+ bus = val;
+ p = e + 1;
+ val = strtoul(p, &e, 16);
+ if (e == p)
+ return -1;
+ if (*e == '.') {
+ slot = val;
+ p = e + 1;
+ val = strtoul(p, &e, 16);
+ if (e == p)
+ return -1;
+ func = val;
+ } else
+ return -1;
+ } else
+ return -1;
+
+ if (bus > 0xff || slot > 0x1f || func > 0x7)
+ return -1;
+
+ if (*e)
+ return -1;
+
+ *busp = bus;
+ *slotp = slot;
+ *funcp = func;
+ return 0;
+}
+
int pci_read_devaddr(Monitor *mon, const char *addr, int *domp, int *busp,
unsigned *slotp)
{
@@ -947,25 +1000,80 @@ static void pci_update_mappings(PCIDevice *d)
}
}
-uint32_t pci_default_read_config(PCIDevice *d,
- uint32_t address, int len)
+static uint32_t pci_read_config(PCIDevice *d,
+ uint32_t address, int len)
{
uint32_t val = 0;
- assert(len == 1 || len == 2 || len == 4);
+
len = MIN(len, pci_config_size(d) - address);
memcpy(&val, d->config + address, len);
return le32_to_cpu(val);
}
+uint32_t pci_default_read_config(PCIDevice *d,
+ uint32_t address, int len)
+{
+ assert(len == 1 || len == 2 || len == 4);
+
+ if (pci_access_cap_config(d, address, len)) {
+ return d->cap.config_read(d, address, len);
+ }
+
+ return pci_read_config(d, address, len);
+}
+
+static void pci_write_config(PCIDevice *pci_dev,
+ uint32_t address, uint32_t val, int len)
+{
+ int i;
+ for (i = 0; i < len; i++) {
+ pci_dev->config[address + i] = val & 0xff;
+ val >>= 8;
+ }
+}
+
+int pci_access_cap_config(PCIDevice *pci_dev, uint32_t address, int len)
+{
+ if (pci_dev->cap.supported && address >= pci_dev->cap.start &&
+ (address + len) < pci_dev->cap.start + pci_dev->cap.length)
+ return 1;
+ return 0;
+}
+
+uint32_t pci_default_cap_read_config(PCIDevice *pci_dev,
+ uint32_t address, int len)
+{
+ return pci_read_config(pci_dev, address, len);
+}
+
+void pci_default_cap_write_config(PCIDevice *pci_dev,
+ uint32_t address, uint32_t val, int len)
+{
+ pci_write_config(pci_dev, address, val, len);
+}
+
void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val, int l)
{
int i;
uint32_t config_size = pci_config_size(d);
+ if (pci_access_cap_config(d, addr, l)) {
+ d->cap.config_write(d, addr, val, l);
+ return;
+ }
+
for (i = 0; i < l && addr + i < config_size; val >>= 8, ++i) {
uint8_t wmask = d->wmask[addr + i];
d->config[addr + i] = (d->config[addr + i] & ~wmask) | (val & wmask);
}
+
+#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
+ if (kvm_enabled() && kvm_irqchip_in_kernel() &&
+ addr >= PIIX_CONFIG_IRQ_ROUTE &&
+ addr < PIIX_CONFIG_IRQ_ROUTE + 4)
+ assigned_dev_update_irqs();
+#endif /* CONFIG_KVM_DEVICE_ASSIGNMENT */
+
if (ranges_overlap(addr, l, PCI_BASE_ADDRESS_0, 24) ||
ranges_overlap(addr, l, PCI_ROM_ADDRESS, 4) ||
ranges_overlap(addr, l, PCI_ROM_ADDRESS1, 4) ||
@@ -986,11 +1094,20 @@ static void pci_set_irq(void *opaque, int irq_num, int level)
if (!change)
return;
+#if defined(TARGET_IA64)
+ ioapic_set_irq(pci_dev, irq_num, level);
+#endif
+
pci_set_irq_state(pci_dev, irq_num, level);
pci_update_irq_status(pci_dev);
pci_change_irq_level(pci_dev, irq_num, change);
}
+int pci_map_irq(PCIDevice *pci_dev, int pin)
+{
+ return pci_dev->bus->map_irq(pci_dev, pin);
+}
+
/***********************************************************/
/* monitor info on PCI */
@@ -1417,6 +1534,37 @@ PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name)
return dev;
}
+int pci_enable_capability_support(PCIDevice *pci_dev,
+ uint32_t config_start,
+ PCICapConfigReadFunc *config_read,
+ PCICapConfigWriteFunc *config_write,
+ PCICapConfigInitFunc *config_init)
+{
+ if (!pci_dev)
+ return -ENODEV;
+
+ pci_dev->config[0x06] |= 0x10; // status = capabilities
+
+ if (config_start == 0)
+ pci_dev->cap.start = PCI_CAPABILITY_CONFIG_DEFAULT_START_ADDR;
+ else if (config_start >= 0x40 && config_start < 0xff)
+ pci_dev->cap.start = config_start;
+ else
+ return -EINVAL;
+
+ if (config_read)
+ pci_dev->cap.config_read = config_read;
+ else
+ pci_dev->cap.config_read = pci_default_cap_read_config;
+ if (config_write)
+ pci_dev->cap.config_write = config_write;
+ else
+ pci_dev->cap.config_write = pci_default_cap_write_config;
+ pci_dev->cap.supported = 1;
+ pci_dev->config[PCI_CAPABILITY_LIST] = pci_dev->cap.start;
+ return config_init(pci_dev);
+}
+
static int pci_find_space(PCIDevice *pdev, uint8_t size)
{
int config_size = pci_config_size(pdev);
diff --git a/hw/pci.h b/hw/pci.h
index dba66ef66..a225a6a2a 100644
--- a/hw/pci.h
+++ b/hw/pci.h
@@ -5,11 +5,16 @@
#include "qdev.h"
+struct kvm_irq_routing_entry;
+
/* PCI includes legacy ISA access. */
#include "isa.h"
-/* PCI bus */
+/* imported from <linux/pci.h> */
+#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
+#define PCI_FUNC(devfn) ((devfn) & 0x07)
+/* PCI bus */
extern target_phys_addr_t pci_mem_base;
#define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
@@ -82,6 +87,12 @@ typedef void PCIMapIORegionFunc(PCIDevice *pci_dev, int region_num,
pcibus_t addr, pcibus_t size, int type);
typedef int PCIUnregisterFunc(PCIDevice *pci_dev);
+typedef void PCICapConfigWriteFunc(PCIDevice *pci_dev,
+ uint32_t address, uint32_t val, int len);
+typedef uint32_t PCICapConfigReadFunc(PCIDevice *pci_dev,
+ uint32_t address, int len);
+typedef int PCICapConfigInitFunc(PCIDevice *pci_dev);
+
typedef struct PCIIORegion {
pcibus_t addr; /* current PCI mapping address. -1 means not mapped */
#define PCI_BAR_UNMAPPED (~(pcibus_t)0)
@@ -160,10 +171,19 @@ typedef struct PCIIORegion {
/* Bits in the PCI Status Register (PCI 2.3 spec) */
#define PCI_STATUS_RESERVED1 0x007
#define PCI_STATUS_INT_STATUS 0x008
+#ifndef PCI_STATUS_CAP_LIST
#define PCI_STATUS_CAP_LIST 0x010
+#endif
+#ifndef PCI_STATUS_66MHZ
#define PCI_STATUS_66MHZ 0x020
+#endif
+
#define PCI_STATUS_RESERVED2 0x040
+
+#ifndef PCI_STATUS_FAST_BACK
#define PCI_STATUS_FAST_BACK 0x080
+#endif
+
#define PCI_STATUS_DEVSEL 0x600
#define PCI_STATUS_RESERVED_MASK_LO (PCI_STATUS_RESERVED1 | \
@@ -192,6 +212,11 @@ enum {
QEMU_PCI_CAP_EXPRESS = 0x2,
};
+#define PCI_CAPABILITY_CONFIG_MAX_LENGTH 0x60
+#define PCI_CAPABILITY_CONFIG_DEFAULT_START_ADDR 0x40
+#define PCI_CAPABILITY_CONFIG_MSI_LENGTH 0x10
+#define PCI_CAPABILITY_CONFIG_MSIX_LENGTH 0x10
+
struct PCIDevice {
DeviceState qdev;
/* PCI config space */
@@ -247,6 +272,23 @@ struct PCIDevice {
char *romfile;
ram_addr_t rom_offset;
uint32_t rom_bar;
+
+ /* How much space does an MSIX table need. */
+ /* The spec requires giving the table structure
+ * a 4K aligned region all by itself. Align it to
+ * target pages so that drivers can do passthrough
+ * on the rest of the region. */
+ target_phys_addr_t msix_page_size;
+
+ struct kvm_irq_routing_entry *msix_irq_entries;
+
+ /* Device capability configuration space */
+ struct {
+ int supported;
+ unsigned int start, length;
+ PCICapConfigReadFunc *config_read;
+ PCICapConfigWriteFunc *config_write;
+ } cap;
};
PCIDevice *pci_register_device(PCIBus *bus, const char *name,
@@ -258,6 +300,14 @@ void pci_register_bar(PCIDevice *pci_dev, int region_num,
pcibus_t size, int type,
PCIMapIORegionFunc *map_func);
+int pci_enable_capability_support(PCIDevice *pci_dev,
+ uint32_t config_start,
+ PCICapConfigReadFunc *config_read,
+ PCICapConfigWriteFunc *config_write,
+ PCICapConfigInitFunc *config_init);
+
+int pci_map_irq(PCIDevice *pci_dev, int pin);
+
int pci_add_capability(PCIDevice *pci_dev, uint8_t cap_id, uint8_t cap_size);
void pci_del_capability(PCIDevice *pci_dev, uint8_t cap_id, uint8_t cap_size);
@@ -266,13 +316,17 @@ void pci_reserve_capability(PCIDevice *pci_dev, uint8_t offset, uint8_t size);
uint8_t pci_find_capability(PCIDevice *pci_dev, uint8_t cap_id);
-
uint32_t pci_default_read_config(PCIDevice *d,
uint32_t address, int len);
void pci_default_write_config(PCIDevice *d,
uint32_t address, uint32_t val, int len);
void pci_device_save(PCIDevice *s, QEMUFile *f);
int pci_device_load(PCIDevice *s, QEMUFile *f);
+uint32_t pci_default_cap_read_config(PCIDevice *pci_dev,
+ uint32_t address, int len);
+void pci_default_cap_write_config(PCIDevice *pci_dev,
+ uint32_t address, uint32_t val, int len);
+int pci_access_cap_config(PCIDevice *pci_dev, uint32_t address, int len);
typedef void (*pci_set_irq_fn)(void *opaque, int irq_num, int level);
typedef int (*pci_map_irq_fn)(PCIDevice *pci_dev, int irq_num);
@@ -301,6 +355,9 @@ PCIBus *pci_get_bus_devfn(int *devfnp, const char *devaddr);
int pci_read_devaddr(Monitor *mon, const char *addr, int *domp, int *busp,
unsigned *slotp);
+int pci_parse_host_devaddr(const char *addr, int *busp,
+ int *slotp, int *funcp);
+
void pci_info(Monitor *mon);
PCIBus *pci_bridge_init(PCIBus *bus, int devfn, uint16_t vid, uint16_t did,
pci_map_irq_fn map_irq, const char *name);
diff --git a/hw/pcspk.c b/hw/pcspk.c
index 26a0ecb9d..128836ba4 100644
--- a/hw/pcspk.c
+++ b/hw/pcspk.c
@@ -27,6 +27,8 @@
#include "isa.h"
#include "audio/audio.h"
#include "qemu-timer.h"
+#include "i8254.h"
+#include "qemu-kvm.h"
#define PCSPK_BUF_LEN 1792
#define PCSPK_SAMPLE_RATE 32000
@@ -48,6 +50,43 @@ typedef struct {
static const char *s_spk = "pcspk";
static PCSpkState pcspk_state;
+#ifdef CONFIG_KVM_PIT
+static void kvm_get_pit_ch2(PITState *pit,
+ struct kvm_pit_state *inkernel_state)
+{
+ struct kvm_pit_state pit_state;
+
+ if (kvm_enabled() && qemu_kvm_pit_in_kernel()) {
+ kvm_get_pit(kvm_context, &pit_state);
+ pit->channels[2].mode = pit_state.channels[2].mode;
+ pit->channels[2].count = pit_state.channels[2].count;
+ pit->channels[2].count_load_time = pit_state.channels[2].count_load_time;
+ pit->channels[2].gate = pit_state.channels[2].gate;
+ if (inkernel_state) {
+ memcpy(inkernel_state, &pit_state, sizeof(*inkernel_state));
+ }
+ }
+}
+
+static void kvm_set_pit_ch2(PITState *pit,
+ struct kvm_pit_state *inkernel_state)
+{
+ if (kvm_enabled() && qemu_kvm_pit_in_kernel()) {
+ inkernel_state->channels[2].mode = pit->channels[2].mode;
+ inkernel_state->channels[2].count = pit->channels[2].count;
+ inkernel_state->channels[2].count_load_time =
+ pit->channels[2].count_load_time;
+ inkernel_state->channels[2].gate = pit->channels[2].gate;
+ kvm_set_pit(kvm_context, inkernel_state);
+ }
+}
+#else
+static inline void kvm_get_pit_ch2(PITState *pit,
+ struct kvm_pit_state *inkernel_state) { }
+static inline void kvm_set_pit_ch2(PITState *pit,
+ struct kvm_pit_state *inkernel_state) { }
+#endif
+
static inline void generate_samples(PCSpkState *s)
{
unsigned int i;
@@ -72,6 +111,8 @@ static void pcspk_callback(void *opaque, int free)
PCSpkState *s = opaque;
unsigned int n;
+ kvm_get_pit_ch2(s->pit, NULL);
+
if (pit_get_mode(s->pit, 2) != 3)
return;
@@ -117,6 +158,8 @@ static uint32_t pcspk_ioport_read(void *opaque, uint32_t addr)
PCSpkState *s = opaque;
int out;
+ kvm_get_pit_ch2(s->pit, NULL);
+
s->dummy_refresh_clock ^= (1 << 4);
out = pit_get_out(s->pit, 2, qemu_get_clock(vm_clock)) << 5;
@@ -125,9 +168,12 @@ static uint32_t pcspk_ioport_read(void *opaque, uint32_t addr)
static void pcspk_ioport_write(void *opaque, uint32_t addr, uint32_t val)
{
+ struct kvm_pit_state inkernel_state;
PCSpkState *s = opaque;
const int gate = val & 1;
+ kvm_get_pit_ch2(s->pit, &inkernel_state);
+
s->data_on = (val >> 1) & 1;
pit_set_gate(s->pit, 2, gate);
if (s->voice) {
@@ -135,6 +181,8 @@ static void pcspk_ioport_write(void *opaque, uint32_t addr, uint32_t val)
s->play_pos = 0;
AUD_set_active_out(s->voice, gate & s->data_on);
}
+
+ kvm_set_pit_ch2(s->pit, &inkernel_state);
}
void pcspk_init(PITState *pit)
diff --git a/hw/piix_pci.c b/hw/piix_pci.c
index 1b67475e0..001bc93cd 100644
--- a/hw/piix_pci.c
+++ b/hw/piix_pci.c
@@ -29,6 +29,8 @@
#include "isa.h"
#include "sysbus.h"
+#include "qemu-kvm.h"
+
typedef PCIHostState I440FXState;
typedef struct PIIX3State {
@@ -88,6 +90,10 @@ static void i440fx_update_memory_mappings(PCII440FXState *d)
int i, r;
uint32_t smram, addr;
+ if (kvm_enabled()) {
+ /* FIXME: Support remappings and protection changes. */
+ return;
+ }
update_pam(d, 0xf0000, 0x100000, (d->dev.config[0x59] >> 4) & 3);
for(i = 0; i < 12; i++) {
r = (d->dev.config[(i >> 1) + 0x5a] >> ((i & 1) * 4)) & 3;
@@ -201,6 +207,8 @@ static int i440fx_initfn(PCIDevice *dev)
return 0;
}
+static PIIX3State *piix3_dev;
+
PCIBus *i440fx_init(PCII440FXState **pi440fx_state, int *piix3_devfn, qemu_irq *pic)
{
DeviceState *dev;
@@ -226,6 +234,8 @@ PCIBus *i440fx_init(PCII440FXState **pi440fx_state, int *piix3_devfn, qemu_irq *
*piix3_devfn = piix3->dev.devfn;
+ piix3_dev = piix3;
+
return b;
}
@@ -253,6 +263,13 @@ static void piix3_set_irq(void *opaque, int irq_num, int level)
}
}
+int piix_get_irq(int pin)
+{
+ if (piix3_dev)
+ return piix3_dev->dev.config[0x60+pin];
+ return 0;
+}
+
static void piix3_reset(void *opaque)
{
PIIX3State *d = opaque;
diff --git a/hw/ppc440.c b/hw/ppc440.c
index abe0a560d..ef883735b 100644
--- a/hw/ppc440.c
+++ b/hw/ppc440.c
@@ -20,6 +20,7 @@
#include "ppc405.h"
#include "sysemu.h"
#include "kvm.h"
+#include "qemu-kvm.h"
#define PPC440EP_PCI_CONFIG 0xeec00000
#define PPC440EP_PCI_INTACK 0xeed00000
diff --git a/hw/ppc440_bamboo.c b/hw/ppc440_bamboo.c
index a4882406a..25417e397 100644
--- a/hw/ppc440_bamboo.c
+++ b/hw/ppc440_bamboo.c
@@ -24,6 +24,7 @@
#include "device_tree.h"
#include "loader.h"
#include "elf.h"
+#include "qemu-kvm.h"
#define BINARY_DEVICE_TREE_FILE "bamboo.dtb"
diff --git a/hw/ppce500_mpc8544ds.c b/hw/ppce500_mpc8544ds.c
index ea30816b3..45356ca02 100644
--- a/hw/ppce500_mpc8544ds.c
+++ b/hw/ppce500_mpc8544ds.c
@@ -31,6 +31,7 @@
#include "ppce500.h"
#include "loader.h"
#include "elf.h"
+#include "qemu-kvm.h"
#define BINARY_DEVICE_TREE_FILE "mpc8544ds.dtb"
#define UIMAGE_LOAD_BASE 0
diff --git a/hw/testdev.c b/hw/testdev.c
new file mode 100644
index 000000000..ac5b9cd1d
--- /dev/null
+++ b/hw/testdev.c
@@ -0,0 +1,63 @@
+#include "hw.h"
+#include "qdev.h"
+#include "isa.h"
+
+struct testdev {
+ ISADevice dev;
+ CharDriverState *chr;
+};
+
+static void test_device_serial_write(void *opaque, uint32_t addr, uint32_t data)
+{
+ struct testdev *dev = opaque;
+ uint8_t buf[1] = { data };
+
+ if (dev->chr) {
+ qemu_chr_write(dev->chr, buf, 1);
+ }
+}
+
+static void test_device_exit(void *opaque, uint32_t addr, uint32_t data)
+{
+ exit(data);
+}
+
+static uint32_t test_device_memsize_read(void *opaque, uint32_t addr)
+{
+ return ram_size;
+}
+
+static void test_device_irq_line(void *opaque, uint32_t addr, uint32_t data)
+{
+ extern qemu_irq *ioapic_irq_hack;
+
+ qemu_set_irq(ioapic_irq_hack[addr - 0x2000], !!data);
+}
+
+static int init_test_device(ISADevice *isa)
+{
+ struct testdev *dev = DO_UPCAST(struct testdev, dev, isa);
+
+ register_ioport_write(0xf1, 1, 1, test_device_serial_write, dev);
+ register_ioport_write(0xf4, 1, 4, test_device_exit, dev);
+ register_ioport_read(0xd1, 1, 4, test_device_memsize_read, dev);
+ register_ioport_write(0x2000, 24, 1, test_device_irq_line, NULL);
+ return 0;
+}
+
+static ISADeviceInfo testdev_info = {
+ .qdev.name = "testdev",
+ .qdev.size = sizeof(struct testdev),
+ .init = init_test_device,
+ .qdev.props = (Property[]) {
+ DEFINE_PROP_CHR("chardev", struct testdev, chr),
+ DEFINE_PROP_END_OF_LIST(),
+ },
+};
+
+static void testdev_register_devices(void)
+{
+ isa_qdev_register(&testdev_info);
+}
+
+device_init(testdev_register_devices)
diff --git a/hw/vga-pci.c b/hw/vga-pci.c
index eef78ed08..9089c9f5d 100644
--- a/hw/vga-pci.c
+++ b/hw/vga-pci.c
@@ -68,9 +68,11 @@ static void pci_vga_write_config(PCIDevice *d,
PCIVGAState *pvs = container_of(d, PCIVGAState, dev);
VGACommonState *s = &pvs->vga;
+ vga_dirty_log_stop(s);
pci_default_write_config(d, address, val, len);
if (s->map_addr && pvs->dev.io_regions[0].addr == -1)
s->map_addr = 0;
+ vga_dirty_log_start(s);
}
static int pci_vga_initfn(PCIDevice *dev)
diff --git a/hw/vga.c b/hw/vga.c
index d05f1f9d1..2ad2dcd4f 100644
--- a/hw/vga.c
+++ b/hw/vga.c
@@ -1277,6 +1277,8 @@ static void vga_draw_text(VGACommonState *s, int full_update)
vga_draw_glyph8_func *vga_draw_glyph8;
vga_draw_glyph9_func *vga_draw_glyph9;
+ vga_dirty_log_stop(s);
+
/* compute font data address (in plane 2) */
v = s->sr[3];
offset = (((v >> 4) & 1) | ((v << 1) & 6)) * 8192 * 4 + 2;
@@ -1589,40 +1591,65 @@ static void vga_sync_dirty_bitmap(VGACommonState *s)
}
#endif
+ vga_dirty_log_start(s);
+}
+
+static int s1, s2, s3;
+
+static void mark_dirty(target_phys_addr_t start, target_phys_addr_t len)
+{
+ target_phys_addr_t end = start + len;
+
+ while (start < end) {
+ cpu_physical_memory_set_dirty(cpu_get_physical_page_desc(start));
+ start += TARGET_PAGE_SIZE;
+ }
}
void vga_dirty_log_start(VGACommonState *s)
{
if (kvm_enabled() && s->map_addr)
- kvm_log_start(s->map_addr, s->map_end - s->map_addr);
-
+ if (!s1) {
+ kvm_log_start(s->map_addr, s->map_end - s->map_addr);
+ mark_dirty(s->map_addr, s->map_end - s->map_addr);
+ s1 = 1;
+ }
if (kvm_enabled() && s->lfb_vram_mapped) {
- kvm_log_start(isa_mem_base + 0xa0000, 0x8000);
- kvm_log_start(isa_mem_base + 0xa8000, 0x8000);
+ if (!s2) {
+ kvm_log_start(isa_mem_base + 0xa0000, 0x8000);
+ kvm_log_start(isa_mem_base + 0xa8000, 0x8000);
+ mark_dirty(isa_mem_base + 0xa0000, 0x10000);
+ }
+ s2 = 1;
}
#ifdef CONFIG_BOCHS_VBE
if (kvm_enabled() && s->vbe_mapped) {
- kvm_log_start(VBE_DISPI_LFB_PHYSICAL_ADDRESS, s->vram_size);
+ if (!s3) {
+ kvm_log_start(VBE_DISPI_LFB_PHYSICAL_ADDRESS, s->vram_size);
+ }
+ s3 = 1;
}
#endif
}
void vga_dirty_log_stop(VGACommonState *s)
{
- if (kvm_enabled() && s->map_addr)
+ if (kvm_enabled() && s->map_addr && s1)
kvm_log_stop(s->map_addr, s->map_end - s->map_addr);
- if (kvm_enabled() && s->lfb_vram_mapped) {
+ if (kvm_enabled() && s->lfb_vram_mapped && s2) {
kvm_log_stop(isa_mem_base + 0xa0000, 0x80000);
kvm_log_stop(isa_mem_base + 0xa8000, 0x80000);
}
#ifdef CONFIG_BOCHS_VBE
- if (kvm_enabled() && s->vbe_mapped) {
+ if (kvm_enabled() && s->vbe_mapped && s3) {
kvm_log_stop(VBE_DISPI_LFB_PHYSICAL_ADDRESS, s->vram_size);
}
#endif
+
+ s1 = s2 = s3 = 0;
}
void vga_dirty_log_restart(VGACommonState *s)
@@ -1860,6 +1887,7 @@ static void vga_draw_blank(VGACommonState *s, int full_update)
return;
if (s->last_scr_width <= 0 || s->last_scr_height <= 0)
return;
+ vga_dirty_log_stop(s);
s->rgb_to_pixel =
rgb_to_pixel_dup_table[get_depth_index(s->ds)];
@@ -1904,6 +1932,9 @@ static void vga_update_display(void *opaque)
vga_draw_text(s, full_update);
break;
case GMODE_GRAPH:
+#ifdef TARGET_IA64
+ full_update = 1;
+#endif
vga_draw_graphic(s, full_update);
break;
case GMODE_BLANK:
diff --git a/hw/vga_int.h b/hw/vga_int.h
index 23a42efce..9e52e709c 100644
--- a/hw/vga_int.h
+++ b/hw/vga_int.h
@@ -33,8 +33,8 @@
/* bochs VBE support */
#define CONFIG_BOCHS_VBE
-#define VBE_DISPI_MAX_XRES 1600
-#define VBE_DISPI_MAX_YRES 1200
+#define VBE_DISPI_MAX_XRES 2560
+#define VBE_DISPI_MAX_YRES 1600
#define VBE_DISPI_MAX_BPP 32
#define VBE_DISPI_INDEX_ID 0x0
@@ -224,7 +224,7 @@ void vga_init_vbe(VGACommonState *s);
extern const uint8_t sr_mask[8];
extern const uint8_t gr_mask[16];
-#define VGA_RAM_SIZE (8192 * 1024)
+#define VGA_RAM_SIZE (16 * 1024 * 1024)
#define VGABIOS_FILENAME "vgabios.bin"
#define VGABIOS_CIRRUS_FILENAME "vgabios-cirrus.bin"
diff --git a/hw/virtio-balloon.c b/hw/virtio-balloon.c
index cfd3b413f..7ca783e49 100644
--- a/hw/virtio-balloon.c
+++ b/hw/virtio-balloon.c
@@ -19,6 +19,7 @@
#include "balloon.h"
#include "virtio-balloon.h"
#include "kvm.h"
+#include "qemu-kvm.h"
#if defined(__linux__)
#include <sys/mman.h>
diff --git a/hw/virtio-console.c b/hw/virtio-console.c
index 57f8f89af..92c953c35 100644
--- a/hw/virtio-console.c
+++ b/hw/virtio-console.c
@@ -129,6 +129,9 @@ VirtIODevice *virtio_console_init(DeviceState *dev)
s = (VirtIOConsole *)virtio_common_init("virtio-console",
VIRTIO_ID_CONSOLE,
0, sizeof(VirtIOConsole));
+ if (s == NULL)
+ return NULL;
+
s->vdev.get_features = virtio_console_get_features;
s->ivq = virtio_add_queue(&s->vdev, 128, virtio_console_handle_input);
diff --git a/hw/vmport.c b/hw/vmport.c
index 884af3fd9..648861b4d 100644
--- a/hw/vmport.c
+++ b/hw/vmport.c
@@ -21,10 +21,12 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
+
#include "hw.h"
#include "isa.h"
#include "pc.h"
#include "sysemu.h"
+#include "qemu-kvm.h"
//#define VMPORT_DEBUG
@@ -57,6 +59,10 @@ static uint32_t vmport_ioport_read(void *opaque, uint32_t addr)
CPUState *env = cpu_single_env;
unsigned char command;
uint32_t eax;
+ uint32_t ret;
+
+ if (kvm_enabled())
+ kvm_save_registers(env);
eax = env->regs[R_EAX];
if (eax != VMPORT_MAGIC)
@@ -73,7 +79,12 @@ static uint32_t vmport_ioport_read(void *opaque, uint32_t addr)
return eax;
}
- return s->func[command](s->opaque[command], addr);
+ ret = s->func[command](s->opaque[command], addr);
+
+ if (kvm_enabled())
+ kvm_load_registers(env);
+
+ return ret;
}
static void vmport_ioport_write(void *opaque, uint32_t addr, uint32_t val)
diff --git a/ia64.ld b/ia64.ld
index 0c377967c..081aaf1f9 100644
--- a/ia64.ld
+++ b/ia64.ld
@@ -7,7 +7,7 @@ ENTRY(_start)
SECTIONS
{
/* Read-only sections, merged into text segment: */
- PROVIDE (__executable_start = 0x60000000); . = 0x60000000 + SIZEOF_HEADERS;
+ PROVIDE (__executable_start = 0x4000000060000000); . = 0x4000000060000000 + SIZEOF_HEADERS;
.interp : { *(.interp) }
.hash : { *(.hash) }
.dynsym : { *(.dynsym) }
diff --git a/ia64intrin.h b/ia64intrin.h
new file mode 100644
index 000000000..ddd5ed980
--- /dev/null
+++ b/ia64intrin.h
@@ -0,0 +1,150 @@
+#ifndef IA64_INTRINSIC_H
+#define IA64_INTRINSIC_H
+
+/*
+ * Compiler-dependent Intrinsics
+ *
+ * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
+ * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
+ *
+ */
+extern long ia64_cmpxchg_called_with_bad_pointer (void);
+extern void ia64_bad_param_for_getreg (void);
+#define ia64_cmpxchg(sem,ptr,o,n,s) ({ \
+ uint64_t _o, _r; \
+ switch(s) { \
+ case 1: _o = (uint8_t)(long)(o); break; \
+ case 2: _o = (uint16_t)(long)(o); break; \
+ case 4: _o = (uint32_t)(long)(o); break; \
+ case 8: _o = (uint64_t)(long)(o); break; \
+ default: break; \
+ } \
+ switch(s) { \
+ case 1: \
+ _r = ia64_cmpxchg1_##sem((uint8_t*)ptr,n,_o); break; \
+ case 2: \
+ _r = ia64_cmpxchg2_##sem((uint16_t*)ptr,n,_o); break; \
+ case 4: \
+ _r = ia64_cmpxchg4_##sem((uint32_t*)ptr,n,_o); break; \
+ case 8: \
+ _r = ia64_cmpxchg8_##sem((uint64_t*)ptr,n,_o); break; \
+ default: \
+ _r = ia64_cmpxchg_called_with_bad_pointer(); break; \
+ } \
+ (__typeof__(o)) _r; \
+})
+
+#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg(acq,ptr,o,n,sizeof(*ptr))
+#define cmpxchg_rel(ptr,o,n) ia64_cmpxchg(rel,ptr,o,n,sizeof(*ptr))
+
+#ifdef __INTEL_COMPILER
+void __fc(uint64_t *addr);
+void __synci(void);
+void __isrlz(void);
+void __dsrlz(void);
+uint64_t __getReg(const int whichReg);
+uint64_t _InterlockedCompareExchange8_rel(volatile uint8_t *dest, uint64_t xchg, uint64_t comp);
+uint64_t _InterlockedCompareExchange8_acq(volatile uint8_t *dest, uint64_t xchg, uint64_t comp);
+uint64_t _InterlockedCompareExchange16_rel(volatile uint16_t *dest, uint64_t xchg, uint64_t comp);
+uint64_t _InterlockedCompareExchange16_acq(volatile uint16_t *dest, uint64_t xchg, uint64_t comp);
+uint64_t _InterlockedCompareExchange_rel(volatile uint32_t *dest, uint64_t xchg, uint64_t comp);
+uint64_t _InterlockedCompareExchange_acq(volatile uint32_t *dest, uint64_t xchg, uint64_t comp);
+uint64_t _InterlockedCompareExchange64_rel(volatile uint64_t *dest, uint64_t xchg, uint64_t comp);
+u64_t _InterlockedCompareExchange64_acq(volatile uint64_t *dest, uint64_t xchg, uint64_t comp);
+
+#define ia64_cmpxchg1_rel _InterlockedCompareExchange8_rel
+#define ia64_cmpxchg1_acq _InterlockedCompareExchange8_acq
+#define ia64_cmpxchg2_rel _InterlockedCompareExchange16_rel
+#define ia64_cmpxchg2_acq _InterlockedCompareExchange16_acq
+#define ia64_cmpxchg4_rel _InterlockedCompareExchange_rel
+#define ia64_cmpxchg4_acq _InterlockedCompareExchange_acq
+#define ia64_cmpxchg8_rel _InterlockedCompareExchange64_rel
+#define ia64_cmpxchg8_acq _InterlockedCompareExchange64_acq
+
+#define ia64_srlz_d __dsrlz
+#define ia64_srlz_i __isrlz
+#define __ia64_fc __fc
+#define ia64_sync_i __synci
+#define __ia64_getreg __getReg
+#else /* __INTEL_COMPILER */
+#define ia64_cmpxchg1_acq(ptr, new, old) \
+({ \
+ uint64_t ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg1_rel(ptr, new, old) \
+({ \
+ uint64_t ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg2_acq(ptr, new, old) \
+({ \
+ uint64_t ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg2_rel(ptr, new, old) \
+({ \
+ uint64_t ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ \
+ asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg4_acq(ptr, new, old) \
+({ \
+ uint64_t ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg4_rel(ptr, new, old) \
+({ \
+ uint64_t ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg8_acq(ptr, new, old) \
+({ \
+ uint64_t ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg8_rel(ptr, new, old) \
+({ \
+ uint64_t ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ \
+ asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
+#define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory");
+#define __ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
+#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
+
+#endif /* __INTEL_COMPILER */
+#endif /* IA64_INTRINSIC_H */
diff --git a/kvm-all.c b/kvm-all.c
index 15ec38e63..0423fff74 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -26,6 +26,7 @@
#include "gdbstub.h"
#include "kvm.h"
+#ifdef KVM_UPSTREAM
/* KVM uses PAGE_SIZE in it's definition of COALESCED_MMIO_MAX */
#define PAGE_SIZE TARGET_PAGE_SIZE
@@ -57,7 +58,6 @@ struct KVMState
KVMSlot slots[32];
int fd;
int vmfd;
- int regs_modified;
int coalesced_mmio;
int broken_set_mem_region;
int migration_log;
@@ -157,12 +157,14 @@ static void kvm_reset_vcpu(void *opaque)
abort();
}
}
+#endif
int kvm_irqchip_in_kernel(void)
{
return kvm_state->irqchip_in_kernel;
}
+#ifdef KVM_UPSTREAM
int kvm_pit_in_kernel(void)
{
return kvm_state->pit_in_kernel;
@@ -343,6 +345,7 @@ int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
return ret;
}
+#endif
int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
{
@@ -393,6 +396,7 @@ int kvm_check_extension(KVMState *s, unsigned int extension)
return ret;
}
+#ifdef KVM_UPSTREAM
int kvm_init(int smp_cpus)
{
@@ -504,6 +508,7 @@ err:
return ret;
}
+#endif
static int kvm_handle_io(uint16_t port, void *data, int direction, int size,
uint32_t count)
@@ -544,6 +549,7 @@ static int kvm_handle_io(uint16_t port, void *data, int direction, int size,
return 1;
}
+#ifdef KVM_UPSTREAM
static void kvm_run_coalesced_mmio(CPUState *env, struct kvm_run *run)
{
#ifdef KVM_CAP_COALESCED_MMIO
@@ -812,6 +818,7 @@ void kvm_set_phys_mem(target_phys_addr_t start_addr,
}
}
+#endif
int kvm_ioctl(KVMState *s, int type, ...)
{
int ret;
@@ -879,6 +886,7 @@ int kvm_has_vcpu_events(void)
return kvm_state->vcpu_events;
}
+#ifdef KVM_UPSTREAM
void kvm_setup_guest_memory(void *start, size_t size)
{
if (!kvm_has_sync_mmu()) {
@@ -897,7 +905,11 @@ void kvm_setup_guest_memory(void *start, size_t size)
}
}
+#endif /* KVM_UPSTREAM */
+
#ifdef KVM_CAP_SET_GUEST_DEBUG
+
+#ifdef KVM_UPSTREAM
static void on_vcpu(CPUState *env, void (*func)(void *data), void *data)
{
#ifdef CONFIG_IOTHREAD
@@ -910,6 +922,7 @@ static void on_vcpu(CPUState *env, void (*func)(void *data), void *data)
func(data);
#endif
}
+#endif /* KVM_UPSTREAM */
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
target_ulong pc)
@@ -928,6 +941,8 @@ int kvm_sw_breakpoints_active(CPUState *env)
return !QTAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints);
}
+#ifdef KVM_UPSTREAM
+
struct kvm_set_guest_debug_data {
struct kvm_guest_debug dbg;
CPUState *env;
@@ -961,6 +976,7 @@ int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
on_vcpu(env, kvm_invoke_set_guest_debug, &data);
return data.err;
}
+#endif
int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
target_ulong len, int type)
@@ -1085,3 +1101,5 @@ void kvm_remove_all_breakpoints(CPUState *current_env)
{
}
#endif /* !KVM_CAP_SET_GUEST_DEBUG */
+
+#include "qemu-kvm.c"
diff --git a/kvm-tpr-opt.c b/kvm-tpr-opt.c
new file mode 100644
index 000000000..bf9c9a06e
--- /dev/null
+++ b/kvm-tpr-opt.c
@@ -0,0 +1,410 @@
+/*
+ * tpr optimization for qemu/kvm
+ *
+ * Copyright (C) 2007-2008 Qumranet Technologies
+ *
+ * Licensed under the terms of the GNU GPL version 2 or higher.
+ */
+
+#include "config.h"
+#include "config-host.h"
+
+#include <string.h>
+
+#include "hw/hw.h"
+#include "hw/isa.h"
+#include "sysemu.h"
+#include "qemu-kvm.h"
+#include "cpu.h"
+
+#include <stdio.h>
+
+static uint64_t map_addr(struct kvm_sregs *sregs, target_ulong virt, unsigned *perms)
+{
+ uint64_t mask = ((1ull << 48) - 1) & ~4095ull;
+ uint64_t p, pp = 7;
+
+ p = sregs->cr3;
+ if (sregs->cr4 & 0x20) {
+ p &= ~31ull;
+ p = ldq_phys(p + 8 * (virt >> 30));
+ if (!(p & 1))
+ return -1ull;
+ p &= mask;
+ p = ldq_phys(p + 8 * ((virt >> 21) & 511));
+ if (!(p & 1))
+ return -1ull;
+ pp &= p;
+ if (p & 128) {
+ p += ((virt >> 12) & 511) << 12;
+ } else {
+ p &= mask;
+ p = ldq_phys(p + 8 * ((virt >> 12) & 511));
+ if (!(p & 1))
+ return -1ull;
+ pp &= p;
+ }
+ } else {
+ p &= mask;
+ p = ldl_phys(p + 4 * ((virt >> 22) & 1023));
+ if (!(p & 1))
+ return -1ull;
+ pp &= p;
+ if (p & 128) {
+ p += ((virt >> 12) & 1023) << 12;
+ } else {
+ p &= mask;
+ p = ldl_phys(p + 4 * ((virt >> 12) & 1023));
+ pp &= p;
+ if (!(p & 1))
+ return -1ull;
+ }
+ }
+ if (perms)
+ *perms = pp >> 1;
+ p &= mask;
+ return p + (virt & 4095);
+}
+
+static uint8_t read_byte_virt(CPUState *env, target_ulong virt)
+{
+ struct kvm_sregs sregs;
+
+ kvm_get_sregs(env, &sregs);
+ return ldub_phys(map_addr(&sregs, virt, NULL));
+}
+
+static void write_byte_virt(CPUState *env, target_ulong virt, uint8_t b)
+{
+ struct kvm_sregs sregs;
+
+ kvm_get_sregs(env, &sregs);
+ stb_phys(map_addr(&sregs, virt, NULL), b);
+}
+
+static __u64 kvm_rsp_read(CPUState *env)
+{
+ struct kvm_regs regs;
+
+ kvm_get_regs(env, &regs);
+ return regs.rsp;
+}
+
+struct vapic_bios {
+ char signature[8];
+ uint32_t virt_base;
+ uint32_t fixup_start;
+ uint32_t fixup_end;
+ uint32_t vapic;
+ uint32_t vapic_size;
+ uint32_t vcpu_shift;
+ uint32_t real_tpr;
+ struct vapic_patches {
+ uint32_t set_tpr;
+ uint32_t set_tpr_eax;
+ uint32_t get_tpr[8];
+ uint32_t get_tpr_stack;
+ } __attribute__((packed)) up, mp;
+} __attribute__((packed));
+
+static struct vapic_bios vapic_bios;
+
+static uint32_t real_tpr;
+static uint32_t bios_addr;
+static uint32_t vapic_phys;
+static uint32_t bios_enabled;
+static uint32_t vbios_desc_phys;
+static uint32_t vapic_bios_addr;
+
+static void update_vbios_real_tpr(void)
+{
+ cpu_physical_memory_rw(vbios_desc_phys, (void *)&vapic_bios, sizeof vapic_bios, 0);
+ vapic_bios.real_tpr = real_tpr;
+ vapic_bios.vcpu_shift = 7;
+ cpu_physical_memory_rw(vbios_desc_phys, (void *)&vapic_bios, sizeof vapic_bios, 1);
+}
+
+static unsigned modrm_reg(uint8_t modrm)
+{
+ return (modrm >> 3) & 7;
+}
+
+static int is_abs_modrm(uint8_t modrm)
+{
+ return (modrm & 0xc7) == 0x05;
+}
+
+static int instruction_is_ok(CPUState *env, uint64_t rip, int is_write)
+{
+ uint8_t b1, b2;
+ unsigned addr_offset;
+ uint32_t addr;
+ uint64_t p;
+
+ if ((rip & 0xf0000000) != 0x80000000 && (rip & 0xf0000000) != 0xe0000000)
+ return 0;
+ if (kvm_rsp_read(env) == 0)
+ return 0;
+ b1 = read_byte_virt(env, rip);
+ b2 = read_byte_virt(env, rip + 1);
+ switch (b1) {
+ case 0xc7: /* mov imm32, r/m32 (c7/0) */
+ if (modrm_reg(b2) != 0)
+ return 0;
+ /* fall through */
+ case 0x89: /* mov r32 to r/m32 */
+ case 0x8b: /* mov r/m32 to r32 */
+ if (!is_abs_modrm(b2))
+ return 0;
+ addr_offset = 2;
+ break;
+ case 0xa1: /* mov abs to eax */
+ case 0xa3: /* mov eax to abs */
+ addr_offset = 1;
+ break;
+ case 0xff: /* push r/m32 */
+ if (modrm_reg(b2) != 6 || !is_abs_modrm(b2))
+ return 0;
+ addr_offset = 2;
+ default:
+ return 0;
+ }
+ p = rip + addr_offset;
+ addr = read_byte_virt(env, p++);
+ addr |= read_byte_virt(env, p++) << 8;
+ addr |= read_byte_virt(env, p++) << 16;
+ addr |= read_byte_virt(env, p++) << 24;
+ if ((addr & 0xfff) != 0x80)
+ return 0;
+ real_tpr = addr;
+ update_vbios_real_tpr();
+ return 1;
+}
+
+static int bios_is_mapped(CPUState *env, uint64_t rip)
+{
+ uint32_t probe;
+ uint64_t phys;
+ struct kvm_sregs sregs;
+ unsigned perms;
+ uint32_t i;
+ uint32_t offset, fixup, start = vapic_bios_addr ? : 0xe0000;
+
+ if (bios_enabled)
+ return 1;
+
+ kvm_get_sregs(env, &sregs);
+
+ probe = (rip & 0xf0000000) + start;
+ phys = map_addr(&sregs, probe, &perms);
+ if (phys != start)
+ return 0;
+ bios_addr = probe;
+ for (i = 0; i < 64; ++i) {
+ cpu_physical_memory_read(phys, (void *)&vapic_bios, sizeof(vapic_bios));
+ if (memcmp(vapic_bios.signature, "kvm aPiC", 8) == 0)
+ break;
+ phys += 1024;
+ bios_addr += 1024;
+ }
+ if (i == 64)
+ return 0;
+ if (bios_addr == vapic_bios.virt_base)
+ return 1;
+ vbios_desc_phys = phys;
+ for (i = vapic_bios.fixup_start; i < vapic_bios.fixup_end; i += 4) {
+ offset = ldl_phys(phys + i - vapic_bios.virt_base);
+ fixup = phys + offset;
+ stl_phys(fixup, ldl_phys(fixup) + bios_addr - vapic_bios.virt_base);
+ }
+ vapic_phys = vapic_bios.vapic - vapic_bios.virt_base + phys;
+ return 1;
+}
+
+static int get_pcr_cpu(CPUState *env)
+{
+ uint8_t b;
+
+ cpu_synchronize_state(env);
+
+ if (cpu_memory_rw_debug(env, env->segs[R_FS].base + 0x51, &b, 1, 0) < 0)
+ return -1;
+
+ return (int)b;
+}
+
+int kvm_tpr_enable_vapic(CPUState *env)
+{
+ static uint8_t one = 1;
+ int pcr_cpu = get_pcr_cpu(env);
+
+ if (pcr_cpu < 0)
+ return 0;
+
+ kvm_enable_vapic(env, vapic_phys + (pcr_cpu << 7));
+ cpu_physical_memory_rw(vapic_phys + (pcr_cpu << 7) + 4, &one, 1, 1);
+ env->update_vapic = 0;
+ bios_enabled = 1;
+ return 1;
+}
+
+static int enable_vapic(CPUState *env)
+{
+ bios_enabled = 1;
+ env->update_vapic = 1;
+ return 1;
+}
+
+static void patch_call(CPUState *env, uint64_t rip, uint32_t target)
+{
+ uint32_t offset;
+
+ offset = target - vapic_bios.virt_base + bios_addr - rip - 5;
+ write_byte_virt(env, rip, 0xe8); /* call near */
+ write_byte_virt(env, rip + 1, offset);
+ write_byte_virt(env, rip + 2, offset >> 8);
+ write_byte_virt(env, rip + 3, offset >> 16);
+ write_byte_virt(env, rip + 4, offset >> 24);
+}
+
+static void patch_instruction(CPUState *env, uint64_t rip)
+{
+ uint8_t b1, b2;
+ struct vapic_patches *vp;
+
+ vp = smp_cpus == 1 ? &vapic_bios.up : &vapic_bios.mp;
+ b1 = read_byte_virt(env, rip);
+ b2 = read_byte_virt(env, rip + 1);
+ switch (b1) {
+ case 0x89: /* mov r32 to r/m32 */
+ write_byte_virt(env, rip, 0x50 + modrm_reg(b2)); /* push reg */
+ patch_call(env, rip + 1, vp->set_tpr);
+ break;
+ case 0x8b: /* mov r/m32 to r32 */
+ write_byte_virt(env, rip, 0x90);
+ patch_call(env, rip + 1, vp->get_tpr[modrm_reg(b2)]);
+ break;
+ case 0xa1: /* mov abs to eax */
+ patch_call(env, rip, vp->get_tpr[0]);
+ break;
+ case 0xa3: /* mov eax to abs */
+ patch_call(env, rip, vp->set_tpr_eax);
+ break;
+ case 0xc7: /* mov imm32, r/m32 (c7/0) */
+ write_byte_virt(env, rip, 0x68); /* push imm32 */
+ write_byte_virt(env, rip + 1, read_byte_virt(env, rip+6));
+ write_byte_virt(env, rip + 2, read_byte_virt(env, rip+7));
+ write_byte_virt(env, rip + 3, read_byte_virt(env, rip+8));
+ write_byte_virt(env, rip + 4, read_byte_virt(env, rip+9));
+ patch_call(env, rip + 5, vp->set_tpr);
+ break;
+ case 0xff: /* push r/m32 */
+ printf("patching push\n");
+ write_byte_virt(env, rip, 0x50); /* push eax */
+ patch_call(env, rip + 1, vp->get_tpr_stack);
+ break;
+ default:
+ printf("funny insn %02x %02x\n", b1, b2);
+ }
+}
+
+void kvm_tpr_access_report(CPUState *env, uint64_t rip, int is_write)
+{
+ if (!instruction_is_ok(env, rip, is_write))
+ return;
+ if (!bios_is_mapped(env, rip))
+ return;
+ if (!kvm_tpr_enable_vapic(env))
+ return;
+ patch_instruction(env, rip);
+}
+
+void kvm_tpr_vcpu_start(CPUState *env)
+{
+ kvm_enable_tpr_access_reporting(env);
+ if (bios_enabled)
+ kvm_tpr_enable_vapic(env);
+}
+
+static void tpr_save(QEMUFile *f, void *s)
+{
+ int i;
+
+ for (i = 0; i < (sizeof vapic_bios) / 4; ++i)
+ qemu_put_be32s(f, &((uint32_t *)&vapic_bios)[i]);
+ qemu_put_be32s(f, &bios_enabled);
+ qemu_put_be32s(f, &real_tpr);
+ qemu_put_be32s(f, &bios_addr);
+ qemu_put_be32s(f, &vapic_phys);
+ qemu_put_be32s(f, &vbios_desc_phys);
+}
+
+static int tpr_load(QEMUFile *f, void *s, int version_id)
+{
+ int i;
+
+ if (version_id != 1)
+ return -EINVAL;
+
+ for (i = 0; i < (sizeof vapic_bios) / 4; ++i)
+ qemu_get_be32s(f, &((uint32_t *)&vapic_bios)[i]);
+ qemu_get_be32s(f, &bios_enabled);
+ qemu_get_be32s(f, &real_tpr);
+ qemu_get_be32s(f, &bios_addr);
+ qemu_get_be32s(f, &vapic_phys);
+ qemu_get_be32s(f, &vbios_desc_phys);
+
+ if (bios_enabled) {
+ CPUState *env = first_cpu->next_cpu;
+
+ for (env = first_cpu; env != NULL; env = env->next_cpu)
+ enable_vapic(env);
+ }
+
+ return 0;
+}
+
+static void vtpr_ioport_write16(void *opaque, uint32_t addr, uint32_t val)
+{
+ struct kvm_regs regs;
+ CPUState *env = cpu_single_env;
+ struct kvm_sregs sregs;
+ kvm_get_regs(env, &regs);
+ kvm_get_sregs(env, &sregs);
+ vapic_bios_addr = ((sregs.cs.base + regs.rip) & ~(512 - 1)) + val;
+ bios_enabled = 0;
+}
+
+static void vtpr_ioport_write(void *opaque, uint32_t addr, uint32_t val)
+{
+ CPUState *env = cpu_single_env;
+ struct kvm_regs regs;
+ struct kvm_sregs sregs;
+ uint32_t rip;
+
+ kvm_get_regs(env, &regs);
+ rip = regs.rip - 2;
+ write_byte_virt(env, rip, 0x66);
+ write_byte_virt(env, rip + 1, 0x90);
+ if (bios_enabled)
+ return;
+ if (!bios_is_mapped(env, rip))
+ printf("bios not mapped?\n");
+ kvm_get_sregs(env, &sregs);
+ for (addr = 0xfffff000u; addr >= 0x80000000u; addr -= 4096)
+ if (map_addr(&sregs, addr, NULL) == 0xfee00000u) {
+ real_tpr = addr + 0x80;
+ break;
+ }
+ bios_enabled = 1;
+ update_vbios_real_tpr();
+ kvm_tpr_enable_vapic(env);
+}
+
+void kvm_tpr_opt_setup(void)
+{
+ register_savevm("kvm-tpr-opt", 0, 1, tpr_save, tpr_load, NULL);
+ register_ioport_write(0x7e, 1, 1, vtpr_ioport_write, NULL);
+ register_ioport_write(0x7e, 2, 2, vtpr_ioport_write16, NULL);
+}
+
diff --git a/kvm.h b/kvm.h
index 1c93ac575..9fa4e2555 100644
--- a/kvm.h
+++ b/kvm.h
@@ -16,6 +16,9 @@
#include "config.h"
#include "qemu-queue.h"
+#include "qemu-kvm.h"
+
+#ifdef KVM_UPSTREAM
#ifdef CONFIG_KVM
extern int kvm_allowed;
@@ -47,7 +50,12 @@ int kvm_log_stop(target_phys_addr_t phys_addr, ram_addr_t size);
int kvm_set_migration_log(int enable);
int kvm_has_sync_mmu(void);
+#endif /* KVM_UPSTREAM */
int kvm_has_vcpu_events(void);
+int kvm_put_vcpu_events(CPUState *env);
+int kvm_get_vcpu_events(CPUState *env);
+
+#ifdef KVM_UPSTREAM
void kvm_setup_guest_memory(void *start, size_t size);
@@ -91,7 +99,9 @@ int kvm_arch_init(KVMState *s, int smp_cpus);
int kvm_arch_init_vcpu(CPUState *env);
+#endif
void kvm_arch_reset_vcpu(CPUState *env);
+#ifdef KVM_UPSTREAM
struct kvm_guest_debug;
struct kvm_debug_exit_arch;
@@ -140,3 +150,5 @@ static inline void cpu_synchronize_state(CPUState *env)
}
#endif
+
+#endif
diff --git a/kvm/.gitignore b/kvm/.gitignore
new file mode 100644
index 000000000..22a820011
--- /dev/null
+++ b/kvm/.gitignore
@@ -0,0 +1,66 @@
+*.o
+*.d
+*~
+*.flat
+*.a
+config.mak
+.*.cmd
+qemu/config-host.h
+qemu/config-host.mak
+user/test/bootstrap
+user/kvmctl
+qemu/dyngen
+qemu/x86_64-softmmu
+qemu/qemu-img
+qemu/qemu-nbd
+*.ko
+*.mod.c
+bios/*.bin
+bios/*.sym
+bios/*.txt
+bios/acpi-dsdt.aml
+vgabios/*.bin
+vgabios/*.txt
+extboot/extboot.bin
+extboot/extboot.img
+extboot/signrom
+kernel/config.kbuild
+kernel/modules.order
+kernel/Module.symvers
+kernel/Modules.symvers
+kernel/Module.markers
+kernel/.tmp_versions
+kernel/include-compat/asm
+kernel/include-compat/asm-x86/asm-x86
+kernel/include
+kernel/x86/modules.order
+kernel/x86/i825[49].[ch]
+kernel/x86/kvm_main.c
+kernel/x86/kvm_svm.h
+kernel/x86/vmx.[ch]
+kernel/x86/svm.[ch]
+kernel/x86/mmu.[ch]
+kernel/x86/paging_tmpl.h
+kernel/x86/x86_emulate.[ch]
+kernel/x86/ioapic.[ch]
+kernel/x86/iodev.h
+kernel/x86/irq.[ch]
+kernel/x86/kvm_trace.c
+kernel/x86/lapic.[ch]
+kernel/x86/tss.h
+kernel/x86/x86.[ch]
+kernel/x86/coalesced_mmio.[ch]
+kernel/x86/kvm_cache_regs.h
+kernel/x86/vtd.c
+kernel/x86/irq_comm.c
+kernel/x86/timer.c
+kernel/x86/kvm_timer.h
+kernel/x86/iommu.c
+qemu/pc-bios/extboot.bin
+qemu/qemu-doc.html
+qemu/*.[18]
+qemu/*.pod
+qemu/qemu-tech.html
+qemu/qemu-options.texi
+user/kvmtrace
+user/test/x86/bootstrap
diff --git a/kvm/Makefile b/kvm/Makefile
new file mode 100644
index 000000000..617504caf
--- /dev/null
+++ b/kvm/Makefile
@@ -0,0 +1,125 @@
+
+include config.mak
+
+DESTDIR=
+
+rpmrelease = devel
+
+sane-arch = $(subst i386,x86,$(subst x86_64,x86,$(subst s390x,s390,$(ARCH))))
+
+.PHONY: kernel user libkvm qemu bios vgabios extboot clean libfdt cscope
+
+all: libkvm qemu
+ifneq '$(filter $(ARCH), x86_64 i386 ia64)' ''
+ all: $(if $(WANT_MODULE), kernel) user
+endif
+
+kcmd = $(if $(WANT_MODULE),,@\#)
+
+qemu kernel user libkvm:
+ $(MAKE) -C $@
+
+qemu: libkvm
+ifneq '$(filter $(ARCH), i386 x86_64)' ''
+ qemu: extboot
+endif
+ifneq '$(filter $(ARCH), powerpc ia64)' ''
+ qemu: libfdt
+endif
+user: libkvm
+
+# sync if kernel/Makefile exists and if using --with-patched-kernel
+user libkvm qemu: header-sync-$(if $(wildcard kernel/Makefile),$(if $(WANT_MODULE),n,y),n)
+
+header-sync-n:
+
+header-sync-y:
+ make -C kernel \
+ LINUX=$(if $(KERNELSOURCEDIR),$(KERNELSOURCEDIR),$(KERNELDIR)) \
+ header-sync
+ rm -f kernel/include/asm
+ ln -sf asm-$(sane-arch) kernel/include/asm
+
+bios:
+ $(MAKE) -C $@
+ cp bios/BIOS-bochs-latest qemu/pc-bios/bios.bin
+
+vgabios:
+ $(MAKE) -C $@
+ cp vgabios/VGABIOS-lgpl-latest.bin qemu/pc-bios/vgabios.bin
+ cp vgabios/VGABIOS-lgpl-latest.cirrus.bin qemu/pc-bios/vgabios-cirrus.bin
+
+extboot:
+ $(MAKE) -C $@
+ if ! [ -f qemu/pc-bios/extboot.bin ] \
+ || ! cmp -s qemu/pc-bios/extboot.bin extboot/extboot.bin; then \
+ cp extboot/extboot.bin qemu/pc-bios/extboot.bin; \
+ fi
+libfdt:
+ $(MAKE) -C $@
+
+LINUX=linux-2.6
+
+sync:
+ make -C kernel sync LINUX=$(shell readlink -f "$(LINUX)")
+
+bindir = /usr/bin
+bin = $(bindir)/kvm
+initdir = /etc/init.d
+confdir = /etc/kvm
+utilsdir = /etc/kvm/utils
+
+install-rpm:
+ mkdir -p $(DESTDIR)/$(bindir)
+ mkdir -p $(DESTDIR)/$(confdir)
+ mkdir -p $(DESTDIR)/$(initdir)
+ mkdir -p $(DESTDIR)/$(utilsdir)
+ mkdir -p $(DESTDIR)/etc/udev/rules.d
+ make -C qemu DESTDIR=$(DESTDIR)/ install
+ ln -sf /usr/kvm/bin/qemu-system-x86_64 $(DESTDIR)/$(bin)
+ install -m 755 kvm_stat $(DESTDIR)/$(bindir)/kvm_stat
+ cp scripts/kvm $(DESTDIR)/$(initdir)/kvm
+ cp scripts/qemu-ifup $(DESTDIR)/$(confdir)/qemu-ifup
+ install -t $(DESTDIR)/etc/udev/rules.d scripts/*kvm*.rules
+
+install:
+ $(kcmd)make -C kernel DESTDIR="$(DESTDIR)" install
+ make -C libkvm DESTDIR="$(DESTDIR)" install
+ make -C qemu DESTDIR="$(DESTDIR)" install
+
+tmpspec = .tmp.kvm.spec
+RPMTOPDIR = $$(pwd)/rpmtop
+
+rpm: srpm
+ rm -rf $(RPMTOPDIR)/BUILD
+ mkdir -p $(RPMTOPDIR)/{BUILD,RPMS/$$(uname -i)}
+ rpmbuild --rebuild \
+ --define="_topdir $(RPMTOPDIR)" \
+ $(RPMTOPDIR)/SRPMS/kvm-0.0-$(rpmrelease).src.rpm
+
+srpm:
+ mkdir -p $(RPMTOPDIR)/{SOURCES,SRPMS}
+ sed 's/^Release:.*/Release: $(rpmrelease)/' kvm.spec > $(tmpspec)
+ tar czf $(RPMTOPDIR)/SOURCES/kvm.tar.gz qemu
+ tar czf $(RPMTOPDIR)/SOURCES/user.tar.gz user
+ tar czf $(RPMTOPDIR)/SOURCES/libkvm.tar.gz libkvm
+ tar czf $(RPMTOPDIR)/SOURCES/kernel.tar.gz kernel
+ tar czf $(RPMTOPDIR)/SOURCES/scripts.tar.gz scripts
+ tar czf $(RPMTOPDIR)/SOURCES/extboot.tar.gz extboot
+ cp Makefile configure kvm_stat $(RPMTOPDIR)/SOURCES
+ rpmbuild --define="_topdir $(RPMTOPDIR)" -bs $(tmpspec)
+ $(RM) $(tmpspec)
+
+clean:
+ for i in $(if $(WANT_MODULE), kernel) user libkvm qemu libfdt; do \
+ make -C $$i clean; \
+ done
+ rm -f ./cscope.*
+
+distclean: clean
+ rm -f config.mak user/config.mak
+
+cscope:
+ rm -f ./cscope.*
+ find . -wholename './kernel' -prune -o -name "*.[ch]" -print > ./cscope.files
+ cscope -b
diff --git a/kvm/configure b/kvm/configure
new file mode 100755
index 000000000..249c743f6
--- /dev/null
+++ b/kvm/configure
@@ -0,0 +1,159 @@
+#!/bin/bash
+
+prefix=/usr/local
+kerneldir=/lib/modules/$(uname -r)/build
+cc=gcc
+ld=ld
+objcopy=objcopy
+ar=ar
+want_module=1
+qemu_cflags=
+qemu_ldflags=
+kvm_trace=
+qemu_opts=()
+cross_prefix=
+arch=`uname -m`
+target_exec=
+# don't use uname if kerneldir is set
+no_uname=
+if [ -z "TMPDIR" ] ; then
+ TMPDIR=.
+fi
+
+if [ ! -e kernel/Makefile ]; then
+ want_module=
+fi
+
+usage() {
+ cat <<-EOF
+ Usage: $0 [options]
+
+ Options include:
+ --arch=ARCH architecture to compile for ($arch)
+ --cross-prefix=PREFIX prefix for cross compile
+ --prefix=PREFIX where to install things ($prefix)
+ --with-patched-kernel don't use external module
+ --with-kvm-trace Enable kvm_trace
+ --kerneldir=DIR kernel build directory ($kerneldir)
+ --qemu-cflags=CFLAGS CFLAGS to add to qemu configuration
+ --qemu-ldflags=LDFLAGS LDFLAGS to add to qemu configuration
+
+ Any additional option is given to qemu's configure verbatim; including:
+
+EOF
+ cd qemu
+ ./configure --help | egrep "enable-|disable-" \
+ | grep -v user | grep -v system | grep -v kqemu | grep -v kvm \
+ | sed -e "s/^ / /g" \
+ | sed -e"s/ enable/enable/g" | sed -e "s/ disable/disable/g"
+ exit 1
+}
+
+while [[ "$1" = -* ]]; do
+ opt="$1"; shift
+ arg=
+ hasarg=
+ if [[ "$opt" = *=* ]]; then
+ arg="${opt#*=}"
+ opt="${opt%%=*}"
+ hasarg=1
+ fi
+ case "$opt" in
+ --prefix)
+ prefix="$arg"
+ ;;
+ --kerneldir)
+ kerneldir="$arg"
+ no_uname=1
+ ;;
+ --with-patched-kernel)
+ want_module=
+ ;;
+ --with-kvm-trace)
+ kvm_trace=y
+ ;;
+ --qemu-cflags)
+ qemu_cflags="$arg"
+ ;;
+ --qemu-ldflags)
+ qemu_ldflags="$arg"
+ ;;
+ --arch)
+ arch="$arg"
+ ;;
+ --cross-prefix)
+ cross_prefix="$arg"
+ ;;
+ --help)
+ usage
+ ;;
+ *)
+ qemu_opts=("${qemu_opts[@]}" "$opt${hasarg:+=$arg}")
+ ;;
+ esac
+done
+
+
+#set kenel directory
+libkvm_kerneldir=$(readlink -f kernel)
+
+case $arch in
+ i?86*|x86_64*)
+ arch=${arch/#i?86/i386}
+ target_exec="x86_64-softmmu"
+ qemu_cflags="$qemu_cflags -DCONFIG_X86"
+ ;;
+ ia64*)
+ target_exec="ia64-softmmu"
+ ;;
+ powerpc*)
+ target_exec="ppcemb-softmmu"
+ qemu_cflags="$qemu_cflags -I $PWD/libfdt"
+ qemu_ldflags="$qemu_ldflags -L $PWD/libfdt"
+ ;;
+esac
+
+processor=${arch#*-}
+arch=${arch%%-*}
+
+#configure kernel module
+[ -e kernel/Makefile ] && (cd kernel;
+ ./configure \
+ --kerneldir="$kerneldir" \
+ --arch="$arch" \
+ $([ -z ${want_module} ] && echo "--with-patched-kernel") \
+ ${cross_prefix:+"--cross-prefix=$cross_prefix"} \
+ ${kvm_trace:+"--with-kvm-trace"}
+)
+
+#configure user dir
+(cd user; ./configure --prefix="$prefix" --kerneldir="$libkvm_kerneldir" \
+ --arch="$arch" --processor="$processor" \
+ ${cross_prefix:+"--cross-prefix=$cross_prefix"})
+
+#configure qemu
+(cd qemu; ./configure --target-list=$target_exec \
+ --disable-kqemu \
+ --extra-cflags="-I $PWD/../libkvm $qemu_cflags" \
+ --extra-ldflags="-L $PWD/../libkvm $qemu_ldflags" \
+ --kerneldir="$libkvm_kerneldir" \
+ --prefix="$prefix" \
+ ${cross_prefix:+"--cross-prefix=$cross_prefix"} \
+ ${cross_prefix:+"--cpu=$arch"} "${qemu_opts[@]}"
+) || usage
+
+
+cat <<EOF > config.mak
+ARCH=$arch
+PROCESSOR=$processor
+PREFIX=$prefix
+KERNELDIR=$kerneldir
+KERNELSOURCEDIR=$kernelsourcedir
+LIBKVM_KERNELDIR=$libkvm_kerneldir
+WANT_MODULE=$want_module
+CROSS_COMPILE=$cross_prefix
+CC=$cross_prefix$cc
+LD=$cross_prefix$ld
+OBJCOPY=$cross_prefix$objcopy
+AR=$cross_prefix$ar
+EOF
diff --git a/kvm/doxygen.conf b/kvm/doxygen.conf
new file mode 100644
index 000000000..21a04c0a9
--- /dev/null
+++ b/kvm/doxygen.conf
@@ -0,0 +1,1252 @@
+# Doxyfile 1.5.1
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
+# by quotes) that should identify the project.
+
+PROJECT_NAME = KVM
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+PROJECT_NUMBER = Release 7
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = docs
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
+# Croatian, Czech, Danish, Dutch, Finnish, French, German, Greek, Hungarian,
+# Italian, Japanese, Japanese-en (Japanese with English messages), Korean,
+# Korean-en, Lithuanian, Norwegian, Polish, Portuguese, Romanian, Russian,
+# Serbian, Slovak, Slovene, Spanish, Swedish, and Ukrainian.
+
+OUTPUT_LANGUAGE = English
+
+# This tag can be used to specify the encoding used in the generated output.
+# The encoding is not always determined by the language that is chosen,
+# but also whether or not the output is meant for Windows or non-Windows users.
+# In case there is a difference, setting the USE_WINDOWS_ENCODING tag to YES
+# forces the Windows encoding (this is the default for the Windows binary),
+# whereas setting the tag to NO uses a Unix-style encoding (the default for
+# all platforms other than Windows).
+
+USE_WINDOWS_ENCODING = NO
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful is your file systems
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like the Qt-style comments (thus requiring an
+# explicit @brief command for a brief description.
+
+JAVADOC_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the DETAILS_AT_TOP tag is set to YES then Doxygen
+# will output the detailed description near the top, like JavaDoc.
+# If set to NO, the detailed description appears after the member
+# documentation.
+
+DETAILS_AT_TOP = YES
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE = 8
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = YES
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
+# sources only. Doxygen will then generate output that is more tailored for Java.
+# For instance, namespaces will be presented as packages, qualified scopes
+# will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want to
+# include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT = NO
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING = YES
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or define consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and defines in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+# If the sources in your project are distributed over multiple directories
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
+# in the documentation. The default is NO.
+
+SHOW_DIRECTORIES = NO
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from the
+# version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET = YES
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS = NO
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED = NO
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be abled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT = user/ kernel/
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py
+
+FILE_PATTERNS =
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
+# directories that are symbolic links (a Unix filesystem feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output. If FILTER_PATTERNS is specified, this tag will be
+# ignored.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER
+# is applied to all files.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES (the default)
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = YES
+
+# If the REFERENCES_RELATION tag is set to YES (the default)
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = YES
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code. Otherwise they will link to the documentstion.
+
+REFERENCES_LINK_SOURCE = NO
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = NO
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet. Note that doxygen will try to copy
+# the style sheet file to the HTML output directory, so don't put your own
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET =
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
+# files or namespaces will be aligned in HTML using tables. If set to
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS = YES
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compressed HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND = NO
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX = NO
+
+# This tag can be used to set the number of enum values (range [1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+
+ENUM_VALUES_PER_LINE = 4
+
+# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be
+# generated containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+,
+# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are
+# probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW = NO
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 250
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, a4wide, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = a4wide
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = NO
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = NO
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader. This is useful
+# if you want to understand what is going on. On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all function-like macros that are alone
+# on a line, have an all uppercase name, and do not end with a semicolon. Such
+# function macros are typically used for boiler-plate code, and will confuse
+# the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles.
+# Optionally an initial location of the external documentation
+# can be added for each tagfile. The format of a tag file without
+# this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths or
+# URLs. If a location is present for each tag, the installdox tool
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option is superseded by the HAVE_DOT option below. This is only a
+# fallback. It is recommended to install and use dot, since it yields more
+# powerful graphs.
+
+CLASS_DIAGRAMS = YES
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = NO
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK = NO
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will
+# generate a call dependency graph for every global function or class method.
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then doxygen will
+# generate a caller dependency graph for every global function or class method.
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are png, jpg, or gif
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width
+# (in pixels) of the graphs generated by dot. If a graph becomes larger than
+# this value, doxygen will try to truncate the graph, so that it fits within
+# the specified constraint. Beware that most browsers cannot cope with very
+# large images.
+
+MAX_DOT_GRAPH_WIDTH = 1024
+
+# The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height
+# (in pixels) of the graphs generated by dot. If a graph becomes larger than
+# this value, doxygen will try to truncate the graph, so that it fits within
+# the specified constraint. Beware that most browsers cannot cope with very
+# large images.
+
+MAX_DOT_GRAPH_HEIGHT = 1024
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that a graph may be further truncated if the graph's
+# image dimensions are not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH
+# and MAX_DOT_GRAPH_HEIGHT). If 0 is used for the depth value (the default),
+# the graph is not depth-constrained.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, which results in a white background.
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS = NO
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to the search engine
+#---------------------------------------------------------------------------
+
+# The SEARCHENGINE tag specifies whether or not a search engine should be
+# used. If set to NO the values of all tags below this one will be ignored.
+
+SEARCHENGINE = NO
diff --git a/kvm/extboot/Makefile b/kvm/extboot/Makefile
new file mode 100644
index 000000000..ab2dae70d
--- /dev/null
+++ b/kvm/extboot/Makefile
@@ -0,0 +1,41 @@
+OBJCOPY=objcopy
+
+# from kernel sources - scripts/Kbuild.include
+# try-run
+# Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise)
+# Exit code chooses option. "$$TMP" is can be used as temporary file and
+# is automatically cleaned up.
+try-run = $(shell set -e; \
+ TMP="$(TMPOUT).$$$$.tmp"; \
+ if ($(1)) >/dev/null 2>&1; \
+ then echo "$(2)"; \
+ else echo "$(3)"; \
+ fi; \
+ rm -f "$$TMP")
+
+# cc-option-yn
+# Usage: flag := $(call cc-option-yn,-march=winchip-c6)
+cc-option-yn = $(call try-run,\
+ $(CC) $(KBUILD_CFLAGS) $(1) -S -xc /dev/null -o "$$TMP",y,n)
+
+CFLAGS = -Wall -Wstrict-prototypes -Werror -fomit-frame-pointer -fno-builtin
+ifeq ($(call cc-option-yn,-fno-stack-protector),y)
+CFLAGS += -fno-stack-protector
+endif
+
+all: extboot.bin
+
+%.o: %.S
+ $(CC) $(CFLAGS) -o $@ -c $<
+
+extboot.img: extboot.o
+ $(LD) --oformat binary -Ttext 0 -o $@ $<
+
+extboot.bin: extboot.img signrom
+ ./signrom extboot.img extboot.bin
+
+signrom: signrom.c
+ $(CC) -o $@ -g -Wall $^
+
+clean:
+ $(RM) *.o *.img *.bin signrom *~
diff --git a/kvm/extboot/STATUS b/kvm/extboot/STATUS
new file mode 100644
index 000000000..687c6d64c
--- /dev/null
+++ b/kvm/extboot/STATUS
@@ -0,0 +1,6 @@
+Working
+-------
+
+Ubuntu Server 7.04 (i386)
+Windows 2000 Professional (i386)
+Windows XP SP2 (i386)
diff --git a/kvm/extboot/signrom.c b/kvm/extboot/signrom.c
new file mode 100644
index 000000000..fe8d67745
--- /dev/null
+++ b/kvm/extboot/signrom.c
@@ -0,0 +1,79 @@
+/*
+ * Extended Boot Option ROM
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corporation, 2007
+ * Authors: Anthony Liguori <aliguori@us.ibm.com>
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+
+int main(int argc, char **argv)
+{
+ FILE *fin, *fout;
+ char buffer[512], oldbuffer[512];
+ int i, size, lag = 0;
+ uint8_t sum = 0;
+
+ if (argc != 3) {
+ printf("Usage: %s ROM OUTPUT\n", argv[0]);
+ return 1;
+ }
+
+ fin = fopen(argv[1], "rb");
+ fout = fopen(argv[2], "wb");
+
+ if (fin == NULL || fout == NULL) {
+ fprintf(stderr, "Could not open input/output files\n");
+ return 1;
+ }
+
+ do {
+ size = fread(buffer, 512, 1, fin);
+ if (size == 1) {
+ for (i = 0; i < 512; i++)
+ sum += buffer[i];
+
+ if (lag) {
+ if (fwrite(oldbuffer, 512, 1, fout) != 1) {
+ fprintf(stderr, "Write failed\n");
+ return 1;
+ }
+ }
+ lag = 1;
+ memcpy(oldbuffer, buffer, 512);
+ }
+ } while (size == 1);
+
+ if (size != 0) {
+ fprintf(stderr, "Failed to read from input file\n");
+ return 1;
+ }
+
+ oldbuffer[511] = -sum;
+
+ if (fwrite(oldbuffer, 512, 1, fout) != 1) {
+ fprintf(stderr, "Failed to write to output file\n");
+ return 1;
+ }
+
+ fclose(fin);
+ fclose(fout);
+
+ return 0;
+}
diff --git a/kvm/include/ia64/asm/kvm.h b/kvm/include/ia64/asm/kvm.h
new file mode 100644
index 000000000..bc90c75ad
--- /dev/null
+++ b/kvm/include/ia64/asm/kvm.h
@@ -0,0 +1,264 @@
+#ifndef __ASM_IA64_KVM_H
+#define __ASM_IA64_KVM_H
+
+/*
+ * kvm structure definitions for ia64
+ *
+ * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* Select x86 specific features in <linux/kvm.h> */
+#define __KVM_HAVE_IOAPIC
+#define __KVM_HAVE_DEVICE_ASSIGNMENT
+
+/* Architectural interrupt line count. */
+#define KVM_NR_INTERRUPTS 256
+
+#define KVM_IOAPIC_NUM_PINS 48
+
+struct kvm_ioapic_state {
+ __u64 base_address;
+ __u32 ioregsel;
+ __u32 id;
+ __u32 irr;
+ __u32 pad;
+ union {
+ __u64 bits;
+ struct {
+ __u8 vector;
+ __u8 delivery_mode:3;
+ __u8 dest_mode:1;
+ __u8 delivery_status:1;
+ __u8 polarity:1;
+ __u8 remote_irr:1;
+ __u8 trig_mode:1;
+ __u8 mask:1;
+ __u8 reserve:7;
+ __u8 reserved[4];
+ __u8 dest_id;
+ } fields;
+ } redirtbl[KVM_IOAPIC_NUM_PINS];
+};
+
+#define KVM_IRQCHIP_PIC_MASTER 0
+#define KVM_IRQCHIP_PIC_SLAVE 1
+#define KVM_IRQCHIP_IOAPIC 2
+#define KVM_NR_IRQCHIPS 3
+
+#define KVM_CONTEXT_SIZE 8*1024
+
+struct kvm_fpreg {
+ union {
+ unsigned long bits[2];
+ long double __dummy; /* force 16-byte alignment */
+ } u;
+};
+
+union context {
+ /* 8K size */
+ char dummy[KVM_CONTEXT_SIZE];
+ struct {
+ unsigned long psr;
+ unsigned long pr;
+ unsigned long caller_unat;
+ unsigned long pad;
+ unsigned long gr[32];
+ unsigned long ar[128];
+ unsigned long br[8];
+ unsigned long cr[128];
+ unsigned long rr[8];
+ unsigned long ibr[8];
+ unsigned long dbr[8];
+ unsigned long pkr[8];
+ struct kvm_fpreg fr[128];
+ };
+};
+
+struct thash_data {
+ union {
+ struct {
+ unsigned long p : 1; /* 0 */
+ unsigned long rv1 : 1; /* 1 */
+ unsigned long ma : 3; /* 2-4 */
+ unsigned long a : 1; /* 5 */
+ unsigned long d : 1; /* 6 */
+ unsigned long pl : 2; /* 7-8 */
+ unsigned long ar : 3; /* 9-11 */
+ unsigned long ppn : 38; /* 12-49 */
+ unsigned long rv2 : 2; /* 50-51 */
+ unsigned long ed : 1; /* 52 */
+ unsigned long ig1 : 11; /* 53-63 */
+ };
+ struct {
+ unsigned long __rv1 : 53; /* 0-52 */
+ unsigned long contiguous : 1; /*53 */
+ unsigned long tc : 1; /* 54 TR or TC */
+ unsigned long cl : 1;
+ /* 55 I side or D side cache line */
+ unsigned long len : 4; /* 56-59 */
+ unsigned long io : 1; /* 60 entry is for io or not */
+ unsigned long nomap : 1;
+ /* 61 entry cann't be inserted into machine TLB.*/
+ unsigned long checked : 1;
+ /* 62 for VTLB/VHPT sanity check */
+ unsigned long invalid : 1;
+ /* 63 invalid entry */
+ };
+ unsigned long page_flags;
+ }; /* same for VHPT and TLB */
+
+ union {
+ struct {
+ unsigned long rv3 : 2;
+ unsigned long ps : 6;
+ unsigned long key : 24;
+ unsigned long rv4 : 32;
+ };
+ unsigned long itir;
+ };
+ union {
+ struct {
+ unsigned long ig2 : 12;
+ unsigned long vpn : 49;
+ unsigned long vrn : 3;
+ };
+ unsigned long ifa;
+ unsigned long vadr;
+ struct {
+ unsigned long tag : 63;
+ unsigned long ti : 1;
+ };
+ unsigned long etag;
+ };
+ union {
+ struct thash_data *next;
+ unsigned long rid;
+ unsigned long gpaddr;
+ };
+};
+
+#define NITRS 8
+#define NDTRS 8
+
+struct saved_vpd {
+ unsigned long vhpi;
+ unsigned long vgr[16];
+ unsigned long vbgr[16];
+ unsigned long vnat;
+ unsigned long vbnat;
+ unsigned long vcpuid[5];
+ unsigned long vpsr;
+ unsigned long vpr;
+ union {
+ unsigned long vcr[128];
+ struct {
+ unsigned long dcr;
+ unsigned long itm;
+ unsigned long iva;
+ unsigned long rsv1[5];
+ unsigned long pta;
+ unsigned long rsv2[7];
+ unsigned long ipsr;
+ unsigned long isr;
+ unsigned long rsv3;
+ unsigned long iip;
+ unsigned long ifa;
+ unsigned long itir;
+ unsigned long iipa;
+ unsigned long ifs;
+ unsigned long iim;
+ unsigned long iha;
+ unsigned long rsv4[38];
+ unsigned long lid;
+ unsigned long ivr;
+ unsigned long tpr;
+ unsigned long eoi;
+ unsigned long irr[4];
+ unsigned long itv;
+ unsigned long pmv;
+ unsigned long cmcv;
+ unsigned long rsv5[5];
+ unsigned long lrr0;
+ unsigned long lrr1;
+ unsigned long rsv6[46];
+ };
+ };
+};
+
+struct kvm_regs {
+ struct saved_vpd vpd;
+ /*Arch-regs*/
+ int mp_state;
+ unsigned long vmm_rr;
+ /* TR and TC. */
+ struct thash_data itrs[NITRS];
+ struct thash_data dtrs[NDTRS];
+ /* Bit is set if there is a tr/tc for the region. */
+ unsigned char itr_regions;
+ unsigned char dtr_regions;
+ unsigned char tc_regions;
+
+ char irq_check;
+ unsigned long saved_itc;
+ unsigned long itc_check;
+ unsigned long timer_check;
+ unsigned long timer_pending;
+ unsigned long last_itc;
+
+ unsigned long vrr[8];
+ unsigned long ibr[8];
+ unsigned long dbr[8];
+ unsigned long insvc[4]; /* Interrupt in service. */
+ unsigned long xtp;
+
+ unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
+ unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */
+ unsigned long metaphysical_saved_rr0; /* from kvm_arch */
+ unsigned long metaphysical_saved_rr4; /* from kvm_arch */
+ unsigned long fp_psr; /*used for lazy float register */
+ unsigned long saved_gp;
+ /*for phycial emulation */
+
+ union context saved_guest;
+
+ unsigned long reserved[64]; /* for future use */
+};
+
+struct kvm_sregs {
+};
+
+struct kvm_fpu {
+};
+
+#define KVM_IA64_VCPU_STACK_SHIFT 16
+#define KVM_IA64_VCPU_STACK_SIZE (1UL << KVM_IA64_VCPU_STACK_SHIFT)
+
+struct kvm_ia64_vcpu_stack {
+ unsigned char stack[KVM_IA64_VCPU_STACK_SIZE];
+};
+
+struct kvm_debug_exit_arch {
+};
+
+/* for KVM_SET_GUEST_DEBUG */
+struct kvm_guest_debug_arch {
+};
+
+#endif
diff --git a/kvm/include/ia64/asm/kvm_para.h b/kvm/include/ia64/asm/kvm_para.h
new file mode 100644
index 000000000..1588aee78
--- /dev/null
+++ b/kvm/include/ia64/asm/kvm_para.h
@@ -0,0 +1,31 @@
+#ifndef __IA64_KVM_PARA_H
+#define __IA64_KVM_PARA_H
+
+/*
+ * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#ifdef __KERNEL__
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+ return 0;
+}
+
+#endif
+
+#endif
diff --git a/kvm/include/linux/compiler.h b/kvm/include/linux/compiler.h
new file mode 100644
index 000000000..f70c49f76
--- /dev/null
+++ b/kvm/include/linux/compiler.h
@@ -0,0 +1,2 @@
+/* dummy file */
+
diff --git a/kvm/include/linux/config.h b/kvm/include/linux/config.h
new file mode 100644
index 000000000..a421f31cf
--- /dev/null
+++ b/kvm/include/linux/config.h
@@ -0,0 +1,43 @@
+#ifndef KVM_UNIFDEF_H
+#define KVM_UNIFDEF_H
+
+#ifdef __i386__
+#ifndef CONFIG_X86_32
+#define CONFIG_X86_32 1
+#endif
+#endif
+
+#ifdef __x86_64__
+#ifndef CONFIG_X86_64
+#define CONFIG_X86_64 1
+#endif
+#endif
+
+#if defined(__i386__) || defined (__x86_64__)
+#ifndef CONFIG_X86
+#define CONFIG_X86 1
+#endif
+#endif
+
+#ifdef __ia64__
+#ifndef CONFIG_IA64
+#define CONFIG_IA64 1
+#endif
+#endif
+
+#ifdef __PPC__
+#ifndef CONFIG_PPC
+#define CONFIG_PPC 1
+#endif
+#endif
+
+#ifdef __s390__
+#ifndef CONFIG_S390
+#define CONFIG_S390 1
+#endif
+#endif
+
+#endif
+
+
+#define __user
diff --git a/kvm/include/linux/kvm.h b/kvm/include/linux/kvm.h
new file mode 100644
index 000000000..3fd3371a1
--- /dev/null
+++ b/kvm/include/linux/kvm.h
@@ -0,0 +1,740 @@
+#ifndef __LINUX_KVM_H
+#define __LINUX_KVM_H
+
+/*
+ * Userspace interface for /dev/kvm - kernel based virtual machine
+ *
+ * Note: you must update KVM_API_VERSION if you change this interface.
+ */
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/ioctl.h>
+#include <asm/kvm.h>
+
+#define KVM_API_VERSION 12
+
+/* *** Deprecated interfaces *** */
+
+#define KVM_TRC_SHIFT 16
+
+#define KVM_TRC_ENTRYEXIT (1 << KVM_TRC_SHIFT)
+#define KVM_TRC_HANDLER (1 << (KVM_TRC_SHIFT + 1))
+
+#define KVM_TRC_VMENTRY (KVM_TRC_ENTRYEXIT + 0x01)
+#define KVM_TRC_VMEXIT (KVM_TRC_ENTRYEXIT + 0x02)
+#define KVM_TRC_PAGE_FAULT (KVM_TRC_HANDLER + 0x01)
+
+#define KVM_TRC_HEAD_SIZE 12
+#define KVM_TRC_CYCLE_SIZE 8
+#define KVM_TRC_EXTRA_MAX 7
+
+#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02)
+#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03)
+#define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04)
+#define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05)
+#define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06)
+#define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07)
+#define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08)
+#define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09)
+#define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A)
+#define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B)
+#define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C)
+#define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D)
+#define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E)
+#define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F)
+#define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10)
+#define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11)
+#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12)
+#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13)
+#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
+#define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15)
+#define KVM_TRC_GTLB_WRITE (KVM_TRC_HANDLER + 0x16)
+#define KVM_TRC_STLB_WRITE (KVM_TRC_HANDLER + 0x17)
+#define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18)
+#define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19)
+
+struct kvm_user_trace_setup {
+ __u32 buf_size;
+ __u32 buf_nr;
+};
+
+#define __KVM_DEPRECATED_MAIN_W_0x06 \
+ _IOW(KVMIO, 0x06, struct kvm_user_trace_setup)
+#define __KVM_DEPRECATED_MAIN_0x07 _IO(KVMIO, 0x07)
+#define __KVM_DEPRECATED_MAIN_0x08 _IO(KVMIO, 0x08)
+
+#define __KVM_DEPRECATED_VM_R_0x70 _IOR(KVMIO, 0x70, struct kvm_assigned_irq)
+
+struct kvm_breakpoint {
+ __u32 enabled;
+ __u32 padding;
+ __u64 address;
+};
+
+struct kvm_debug_guest {
+ __u32 enabled;
+ __u32 pad;
+ struct kvm_breakpoint breakpoints[4];
+ __u32 singlestep;
+};
+
+#define __KVM_DEPRECATED_VCPU_W_0x87 _IOW(KVMIO, 0x87, struct kvm_debug_guest)
+
+/* *** End of deprecated interfaces *** */
+
+
+/* for KVM_CREATE_MEMORY_REGION */
+struct kvm_memory_region {
+ __u32 slot;
+ __u32 flags;
+ __u64 guest_phys_addr;
+ __u64 memory_size; /* bytes */
+};
+
+/* for KVM_SET_USER_MEMORY_REGION */
+struct kvm_userspace_memory_region {
+ __u32 slot;
+ __u32 flags;
+ __u64 guest_phys_addr;
+ __u64 memory_size; /* bytes */
+ __u64 userspace_addr; /* start of the userspace allocated memory */
+};
+
+/* for kvm_memory_region::flags */
+#define KVM_MEM_LOG_DIRTY_PAGES 1UL
+
+
+/* for KVM_IRQ_LINE */
+struct kvm_irq_level {
+ /*
+ * ACPI gsi notion of irq.
+ * For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47..
+ * For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23..
+ */
+ union {
+ __u32 irq;
+ __s32 status;
+ };
+ __u32 level;
+};
+
+
+struct kvm_irqchip {
+ __u32 chip_id;
+ __u32 pad;
+ union {
+ char dummy[512]; /* reserving space */
+#ifdef __KVM_HAVE_PIT
+ struct kvm_pic_state pic;
+#endif
+#ifdef __KVM_HAVE_IOAPIC
+ struct kvm_ioapic_state ioapic;
+#endif
+ } chip;
+};
+
+/* for KVM_CREATE_PIT2 */
+struct kvm_pit_config {
+ __u32 flags;
+ __u32 pad[15];
+};
+
+#define KVM_PIT_SPEAKER_DUMMY 1
+
+#define KVM_EXIT_UNKNOWN 0
+#define KVM_EXIT_EXCEPTION 1
+#define KVM_EXIT_IO 2
+#define KVM_EXIT_HYPERCALL 3
+#define KVM_EXIT_DEBUG 4
+#define KVM_EXIT_HLT 5
+#define KVM_EXIT_MMIO 6
+#define KVM_EXIT_IRQ_WINDOW_OPEN 7
+#define KVM_EXIT_SHUTDOWN 8
+#define KVM_EXIT_FAIL_ENTRY 9
+#define KVM_EXIT_INTR 10
+#define KVM_EXIT_SET_TPR 11
+#define KVM_EXIT_TPR_ACCESS 12
+#define KVM_EXIT_S390_SIEIC 13
+#define KVM_EXIT_S390_RESET 14
+#define KVM_EXIT_DCR 15
+#define KVM_EXIT_NMI 16
+#define KVM_EXIT_INTERNAL_ERROR 17
+
+/* For KVM_EXIT_INTERNAL_ERROR */
+#define KVM_INTERNAL_ERROR_EMULATION 1
+#define KVM_INTERNAL_ERROR_SIMUL_EX 2
+
+/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
+struct kvm_run {
+ /* in */
+ __u8 request_interrupt_window;
+ __u8 padding1[7];
+
+ /* out */
+ __u32 exit_reason;
+ __u8 ready_for_interrupt_injection;
+ __u8 if_flag;
+ __u8 padding2[2];
+
+ /* in (pre_kvm_run), out (post_kvm_run) */
+ __u64 cr8;
+ __u64 apic_base;
+
+#ifdef __KVM_S390
+ /* the processor status word for s390 */
+ __u64 psw_mask; /* psw upper half */
+ __u64 psw_addr; /* psw lower half */
+#endif
+ union {
+ /* KVM_EXIT_UNKNOWN */
+ struct {
+ __u64 hardware_exit_reason;
+ } hw;
+ /* KVM_EXIT_FAIL_ENTRY */
+ struct {
+ __u64 hardware_entry_failure_reason;
+ } fail_entry;
+ /* KVM_EXIT_EXCEPTION */
+ struct {
+ __u32 exception;
+ __u32 error_code;
+ } ex;
+ /* KVM_EXIT_IO */
+ struct {
+#define KVM_EXIT_IO_IN 0
+#define KVM_EXIT_IO_OUT 1
+ __u8 direction;
+ __u8 size; /* bytes */
+ __u16 port;
+ __u32 count;
+ __u64 data_offset; /* relative to kvm_run start */
+ } io;
+ struct {
+ struct kvm_debug_exit_arch arch;
+ } debug;
+ /* KVM_EXIT_MMIO */
+ struct {
+ __u64 phys_addr;
+ __u8 data[8];
+ __u32 len;
+ __u8 is_write;
+ } mmio;
+ /* KVM_EXIT_HYPERCALL */
+ struct {
+ __u64 nr;
+ __u64 args[6];
+ __u64 ret;
+ __u32 longmode;
+ __u32 pad;
+ } hypercall;
+ /* KVM_EXIT_TPR_ACCESS */
+ struct {
+ __u64 rip;
+ __u32 is_write;
+ __u32 pad;
+ } tpr_access;
+ /* KVM_EXIT_S390_SIEIC */
+ struct {
+ __u8 icptcode;
+ __u16 ipa;
+ __u32 ipb;
+ } s390_sieic;
+ /* KVM_EXIT_S390_RESET */
+#define KVM_S390_RESET_POR 1
+#define KVM_S390_RESET_CLEAR 2
+#define KVM_S390_RESET_SUBSYSTEM 4
+#define KVM_S390_RESET_CPU_INIT 8
+#define KVM_S390_RESET_IPL 16
+ __u64 s390_reset_flags;
+ /* KVM_EXIT_DCR */
+ struct {
+ __u32 dcrn;
+ __u32 data;
+ __u8 is_write;
+ } dcr;
+ struct {
+ __u32 suberror;
+ /* Available with KVM_CAP_INTERNAL_ERROR_DATA: */
+ __u32 ndata;
+ __u64 data[16];
+ } internal;
+ /* Fix the size of the union. */
+ char padding[256];
+ };
+};
+
+/* for KVM_REGISTER_COALESCED_MMIO / KVM_UNREGISTER_COALESCED_MMIO */
+
+struct kvm_coalesced_mmio_zone {
+ __u64 addr;
+ __u32 size;
+ __u32 pad;
+};
+
+struct kvm_coalesced_mmio {
+ __u64 phys_addr;
+ __u32 len;
+ __u32 pad;
+ __u8 data[8];
+};
+
+struct kvm_coalesced_mmio_ring {
+ __u32 first, last;
+ struct kvm_coalesced_mmio coalesced_mmio[0];
+};
+
+#define KVM_COALESCED_MMIO_MAX \
+ ((PAGE_SIZE - sizeof(struct kvm_coalesced_mmio_ring)) / \
+ sizeof(struct kvm_coalesced_mmio))
+
+/* for KVM_TRANSLATE */
+struct kvm_translation {
+ /* in */
+ __u64 linear_address;
+
+ /* out */
+ __u64 physical_address;
+ __u8 valid;
+ __u8 writeable;
+ __u8 usermode;
+ __u8 pad[5];
+};
+
+/* for KVM_INTERRUPT */
+struct kvm_interrupt {
+ /* in */
+ __u32 irq;
+};
+
+/* for KVM_GET_DIRTY_LOG */
+struct kvm_dirty_log {
+ __u32 slot;
+ __u32 padding1;
+ union {
+ void *dirty_bitmap; /* one bit per page */
+ __u64 padding2;
+ };
+};
+
+/* for KVM_SET_SIGNAL_MASK */
+struct kvm_signal_mask {
+ __u32 len;
+ __u8 sigset[0];
+};
+
+/* for KVM_TPR_ACCESS_REPORTING */
+struct kvm_tpr_access_ctl {
+ __u32 enabled;
+ __u32 flags;
+ __u32 reserved[8];
+};
+
+/* for KVM_SET_VAPIC_ADDR */
+struct kvm_vapic_addr {
+ __u64 vapic_addr;
+};
+
+/* for KVM_SET_MPSTATE */
+
+#define KVM_MP_STATE_RUNNABLE 0
+#define KVM_MP_STATE_UNINITIALIZED 1
+#define KVM_MP_STATE_INIT_RECEIVED 2
+#define KVM_MP_STATE_HALTED 3
+#define KVM_MP_STATE_SIPI_RECEIVED 4
+
+struct kvm_mp_state {
+ __u32 mp_state;
+};
+
+struct kvm_s390_psw {
+ __u64 mask;
+ __u64 addr;
+};
+
+/* valid values for type in kvm_s390_interrupt */
+#define KVM_S390_SIGP_STOP 0xfffe0000u
+#define KVM_S390_PROGRAM_INT 0xfffe0001u
+#define KVM_S390_SIGP_SET_PREFIX 0xfffe0002u
+#define KVM_S390_RESTART 0xfffe0003u
+#define KVM_S390_INT_VIRTIO 0xffff2603u
+#define KVM_S390_INT_SERVICE 0xffff2401u
+#define KVM_S390_INT_EMERGENCY 0xffff1201u
+
+struct kvm_s390_interrupt {
+ __u32 type;
+ __u32 parm;
+ __u64 parm64;
+};
+
+/* for KVM_SET_GUEST_DEBUG */
+
+#define KVM_GUESTDBG_ENABLE 0x00000001
+#define KVM_GUESTDBG_SINGLESTEP 0x00000002
+
+struct kvm_guest_debug {
+ __u32 control;
+ __u32 pad;
+ struct kvm_guest_debug_arch arch;
+};
+
+enum {
+ kvm_ioeventfd_flag_nr_datamatch,
+ kvm_ioeventfd_flag_nr_pio,
+ kvm_ioeventfd_flag_nr_deassign,
+ kvm_ioeventfd_flag_nr_max,
+};
+
+#define KVM_IOEVENTFD_FLAG_DATAMATCH (1 << kvm_ioeventfd_flag_nr_datamatch)
+#define KVM_IOEVENTFD_FLAG_PIO (1 << kvm_ioeventfd_flag_nr_pio)
+#define KVM_IOEVENTFD_FLAG_DEASSIGN (1 << kvm_ioeventfd_flag_nr_deassign)
+
+#define KVM_IOEVENTFD_VALID_FLAG_MASK ((1 << kvm_ioeventfd_flag_nr_max) - 1)
+
+struct kvm_ioeventfd {
+ __u64 datamatch;
+ __u64 addr; /* legal pio/mmio address */
+ __u32 len; /* 1, 2, 4, or 8 bytes */
+ __s32 fd;
+ __u32 flags;
+ __u8 pad[36];
+};
+
+#define KVMIO 0xAE
+
+/*
+ * ioctls for /dev/kvm fds:
+ */
+#define KVM_GET_API_VERSION _IO(KVMIO, 0x00)
+#define KVM_CREATE_VM _IO(KVMIO, 0x01) /* returns a VM fd */
+#define KVM_GET_MSR_INDEX_LIST _IOWR(KVMIO, 0x02, struct kvm_msr_list)
+
+#define KVM_S390_ENABLE_SIE _IO(KVMIO, 0x06)
+/*
+ * Check if a kvm extension is available. Argument is extension number,
+ * return is 1 (yes) or 0 (no, sorry).
+ */
+#define KVM_CHECK_EXTENSION _IO(KVMIO, 0x03)
+/*
+ * Get size for mmap(vcpu_fd)
+ */
+#define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */
+#define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2)
+#define KVM_TRACE_ENABLE __KVM_DEPRECATED_MAIN_W_0x06
+#define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07
+#define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08
+
+/*
+ * Extension capability list.
+ */
+#define KVM_CAP_IRQCHIP 0
+#define KVM_CAP_HLT 1
+#define KVM_CAP_MMU_SHADOW_CACHE_CONTROL 2
+#define KVM_CAP_USER_MEMORY 3
+#define KVM_CAP_SET_TSS_ADDR 4
+#define KVM_CAP_VAPIC 6
+#define KVM_CAP_EXT_CPUID 7
+#define KVM_CAP_CLOCKSOURCE 8
+#define KVM_CAP_NR_VCPUS 9 /* returns max vcpus per vm */
+#define KVM_CAP_NR_MEMSLOTS 10 /* returns max memory slots per vm */
+#define KVM_CAP_PIT 11
+#define KVM_CAP_NOP_IO_DELAY 12
+#define KVM_CAP_PV_MMU 13
+#define KVM_CAP_MP_STATE 14
+#define KVM_CAP_COALESCED_MMIO 15
+#define KVM_CAP_SYNC_MMU 16 /* Changes to host mmap are reflected in guest */
+#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
+#define KVM_CAP_DEVICE_ASSIGNMENT 17
+#endif
+#define KVM_CAP_IOMMU 18
+#ifdef __KVM_HAVE_MSI
+#define KVM_CAP_DEVICE_MSI 20
+#endif
+/* Bug in KVM_SET_USER_MEMORY_REGION fixed: */
+#define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21
+#ifdef __KVM_HAVE_USER_NMI
+#define KVM_CAP_USER_NMI 22
+#endif
+#ifdef __KVM_HAVE_GUEST_DEBUG
+#define KVM_CAP_SET_GUEST_DEBUG 23
+#endif
+#ifdef __KVM_HAVE_PIT
+#define KVM_CAP_REINJECT_CONTROL 24
+#endif
+#ifdef __KVM_HAVE_IOAPIC
+#define KVM_CAP_IRQ_ROUTING 25
+#endif
+#define KVM_CAP_IRQ_INJECT_STATUS 26
+#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
+#define KVM_CAP_DEVICE_DEASSIGNMENT 27
+#endif
+#ifdef __KVM_HAVE_MSIX
+#define KVM_CAP_DEVICE_MSIX 28
+#endif
+#define KVM_CAP_ASSIGN_DEV_IRQ 29
+/* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */
+#define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30
+#ifdef __KVM_HAVE_MCE
+#define KVM_CAP_MCE 31
+#endif
+#define KVM_CAP_IRQFD 32
+#ifdef __KVM_HAVE_PIT
+#define KVM_CAP_PIT2 33
+#endif
+#define KVM_CAP_SET_BOOT_CPU_ID 34
+#ifdef __KVM_HAVE_PIT_STATE2
+#define KVM_CAP_PIT_STATE2 35
+#endif
+#define KVM_CAP_IOEVENTFD 36
+#define KVM_CAP_SET_IDENTITY_MAP_ADDR 37
+#ifdef __KVM_HAVE_XEN_HVM
+#define KVM_CAP_XEN_HVM 38
+#endif
+#define KVM_CAP_ADJUST_CLOCK 39
+#define KVM_CAP_INTERNAL_ERROR_DATA 40
+#ifdef __KVM_HAVE_VCPU_EVENTS
+#define KVM_CAP_VCPU_EVENTS 41
+#endif
+#define KVM_CAP_S390_PSW 42
+#define KVM_CAP_PPC_SEGSTATE 43
+
+#ifdef KVM_CAP_IRQ_ROUTING
+
+struct kvm_irq_routing_irqchip {
+ __u32 irqchip;
+ __u32 pin;
+};
+
+struct kvm_irq_routing_msi {
+ __u32 address_lo;
+ __u32 address_hi;
+ __u32 data;
+ __u32 pad;
+};
+
+/* gsi routing entry types */
+#define KVM_IRQ_ROUTING_IRQCHIP 1
+#define KVM_IRQ_ROUTING_MSI 2
+
+struct kvm_irq_routing_entry {
+ __u32 gsi;
+ __u32 type;
+ __u32 flags;
+ __u32 pad;
+ union {
+ struct kvm_irq_routing_irqchip irqchip;
+ struct kvm_irq_routing_msi msi;
+ __u32 pad[8];
+ } u;
+};
+
+struct kvm_irq_routing {
+ __u32 nr;
+ __u32 flags;
+ struct kvm_irq_routing_entry entries[0];
+};
+
+#endif
+
+#ifdef KVM_CAP_MCE
+/* x86 MCE */
+struct kvm_x86_mce {
+ __u64 status;
+ __u64 addr;
+ __u64 misc;
+ __u64 mcg_status;
+ __u8 bank;
+ __u8 pad1[7];
+ __u64 pad2[3];
+};
+#endif
+
+#ifdef KVM_CAP_XEN_HVM
+struct kvm_xen_hvm_config {
+ __u32 flags;
+ __u32 msr;
+ __u64 blob_addr_32;
+ __u64 blob_addr_64;
+ __u8 blob_size_32;
+ __u8 blob_size_64;
+ __u8 pad2[30];
+};
+#endif
+
+#define KVM_IRQFD_FLAG_DEASSIGN (1 << 0)
+
+struct kvm_irqfd {
+ __u32 fd;
+ __u32 gsi;
+ __u32 flags;
+ __u8 pad[20];
+};
+
+struct kvm_clock_data {
+ __u64 clock;
+ __u32 flags;
+ __u32 pad[9];
+};
+
+/*
+ * ioctls for VM fds
+ */
+#define KVM_SET_MEMORY_REGION _IOW(KVMIO, 0x40, struct kvm_memory_region)
+/*
+ * KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns
+ * a vcpu fd.
+ */
+#define KVM_CREATE_VCPU _IO(KVMIO, 0x41)
+#define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log)
+#define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias)
+#define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44)
+#define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45)
+#define KVM_SET_USER_MEMORY_REGION _IOW(KVMIO, 0x46, \
+ struct kvm_userspace_memory_region)
+#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
+#define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64)
+/* Device model IOC */
+#define KVM_CREATE_IRQCHIP _IO(KVMIO, 0x60)
+#define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level)
+#define KVM_GET_IRQCHIP _IOWR(KVMIO, 0x62, struct kvm_irqchip)
+#define KVM_SET_IRQCHIP _IOR(KVMIO, 0x63, struct kvm_irqchip)
+#define KVM_CREATE_PIT _IO(KVMIO, 0x64)
+#define KVM_GET_PIT _IOWR(KVMIO, 0x65, struct kvm_pit_state)
+#define KVM_SET_PIT _IOR(KVMIO, 0x66, struct kvm_pit_state)
+#define KVM_IRQ_LINE_STATUS _IOWR(KVMIO, 0x67, struct kvm_irq_level)
+#define KVM_REGISTER_COALESCED_MMIO \
+ _IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone)
+#define KVM_UNREGISTER_COALESCED_MMIO \
+ _IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone)
+#define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \
+ struct kvm_assigned_pci_dev)
+#define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing)
+/* deprecated, replaced by KVM_ASSIGN_DEV_IRQ */
+#define KVM_ASSIGN_IRQ __KVM_DEPRECATED_VM_R_0x70
+#define KVM_ASSIGN_DEV_IRQ _IOW(KVMIO, 0x70, struct kvm_assigned_irq)
+#define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71)
+#define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \
+ struct kvm_assigned_pci_dev)
+#define KVM_ASSIGN_SET_MSIX_NR _IOW(KVMIO, 0x73, \
+ struct kvm_assigned_msix_nr)
+#define KVM_ASSIGN_SET_MSIX_ENTRY _IOW(KVMIO, 0x74, \
+ struct kvm_assigned_msix_entry)
+#define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq)
+#define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd)
+#define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config)
+#define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78)
+#define KVM_IOEVENTFD _IOW(KVMIO, 0x79, struct kvm_ioeventfd)
+#define KVM_XEN_HVM_CONFIG _IOW(KVMIO, 0x7a, struct kvm_xen_hvm_config)
+#define KVM_SET_CLOCK _IOW(KVMIO, 0x7b, struct kvm_clock_data)
+#define KVM_GET_CLOCK _IOR(KVMIO, 0x7c, struct kvm_clock_data)
+/* Available with KVM_CAP_PIT_STATE2 */
+#define KVM_GET_PIT2 _IOR(KVMIO, 0x9f, struct kvm_pit_state2)
+#define KVM_SET_PIT2 _IOW(KVMIO, 0xa0, struct kvm_pit_state2)
+
+/*
+ * ioctls for vcpu fds
+ */
+#define KVM_RUN _IO(KVMIO, 0x80)
+#define KVM_GET_REGS _IOR(KVMIO, 0x81, struct kvm_regs)
+#define KVM_SET_REGS _IOW(KVMIO, 0x82, struct kvm_regs)
+#define KVM_GET_SREGS _IOR(KVMIO, 0x83, struct kvm_sregs)
+#define KVM_SET_SREGS _IOW(KVMIO, 0x84, struct kvm_sregs)
+#define KVM_TRANSLATE _IOWR(KVMIO, 0x85, struct kvm_translation)
+#define KVM_INTERRUPT _IOW(KVMIO, 0x86, struct kvm_interrupt)
+/* KVM_DEBUG_GUEST is no longer supported, use KVM_SET_GUEST_DEBUG instead */
+#define KVM_DEBUG_GUEST __KVM_DEPRECATED_VCPU_W_0x87
+#define KVM_GET_MSRS _IOWR(KVMIO, 0x88, struct kvm_msrs)
+#define KVM_SET_MSRS _IOW(KVMIO, 0x89, struct kvm_msrs)
+#define KVM_SET_CPUID _IOW(KVMIO, 0x8a, struct kvm_cpuid)
+#define KVM_SET_SIGNAL_MASK _IOW(KVMIO, 0x8b, struct kvm_signal_mask)
+#define KVM_GET_FPU _IOR(KVMIO, 0x8c, struct kvm_fpu)
+#define KVM_SET_FPU _IOW(KVMIO, 0x8d, struct kvm_fpu)
+#define KVM_GET_LAPIC _IOR(KVMIO, 0x8e, struct kvm_lapic_state)
+#define KVM_SET_LAPIC _IOW(KVMIO, 0x8f, struct kvm_lapic_state)
+#define KVM_SET_CPUID2 _IOW(KVMIO, 0x90, struct kvm_cpuid2)
+#define KVM_GET_CPUID2 _IOWR(KVMIO, 0x91, struct kvm_cpuid2)
+/* Available with KVM_CAP_VAPIC */
+#define KVM_TPR_ACCESS_REPORTING _IOWR(KVMIO, 0x92, struct kvm_tpr_access_ctl)
+/* Available with KVM_CAP_VAPIC */
+#define KVM_SET_VAPIC_ADDR _IOW(KVMIO, 0x93, struct kvm_vapic_addr)
+/* valid for virtual machine (for floating interrupt)_and_ vcpu */
+#define KVM_S390_INTERRUPT _IOW(KVMIO, 0x94, struct kvm_s390_interrupt)
+/* store status for s390 */
+#define KVM_S390_STORE_STATUS_NOADDR (-1ul)
+#define KVM_S390_STORE_STATUS_PREFIXED (-2ul)
+#define KVM_S390_STORE_STATUS _IOW(KVMIO, 0x95, unsigned long)
+/* initial ipl psw for s390 */
+#define KVM_S390_SET_INITIAL_PSW _IOW(KVMIO, 0x96, struct kvm_s390_psw)
+/* initial reset for s390 */
+#define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97)
+#define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state)
+#define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state)
+/* Available with KVM_CAP_NMI */
+#define KVM_NMI _IO(KVMIO, 0x9a)
+/* Available with KVM_CAP_SET_GUEST_DEBUG */
+#define KVM_SET_GUEST_DEBUG _IOW(KVMIO, 0x9b, struct kvm_guest_debug)
+/* MCE for x86 */
+#define KVM_X86_SETUP_MCE _IOW(KVMIO, 0x9c, __u64)
+#define KVM_X86_GET_MCE_CAP_SUPPORTED _IOR(KVMIO, 0x9d, __u64)
+#define KVM_X86_SET_MCE _IOW(KVMIO, 0x9e, struct kvm_x86_mce)
+/* IA64 stack access */
+#define KVM_IA64_VCPU_GET_STACK _IOR(KVMIO, 0x9a, void *)
+#define KVM_IA64_VCPU_SET_STACK _IOW(KVMIO, 0x9b, void *)
+/* Available with KVM_CAP_VCPU_EVENTS */
+#define KVM_GET_VCPU_EVENTS _IOR(KVMIO, 0x9f, struct kvm_vcpu_events)
+#define KVM_SET_VCPU_EVENTS _IOW(KVMIO, 0xa0, struct kvm_vcpu_events)
+
+#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
+
+struct kvm_assigned_pci_dev {
+ __u32 assigned_dev_id;
+ __u32 busnr;
+ __u32 devfn;
+ __u32 flags;
+ union {
+ __u32 reserved[12];
+ };
+};
+
+#define KVM_DEV_IRQ_HOST_INTX (1 << 0)
+#define KVM_DEV_IRQ_HOST_MSI (1 << 1)
+#define KVM_DEV_IRQ_HOST_MSIX (1 << 2)
+
+#define KVM_DEV_IRQ_GUEST_INTX (1 << 8)
+#define KVM_DEV_IRQ_GUEST_MSI (1 << 9)
+#define KVM_DEV_IRQ_GUEST_MSIX (1 << 10)
+
+#define KVM_DEV_IRQ_HOST_MASK 0x00ff
+#define KVM_DEV_IRQ_GUEST_MASK 0xff00
+
+struct kvm_assigned_irq {
+ __u32 assigned_dev_id;
+ __u32 host_irq;
+ __u32 guest_irq;
+ __u32 flags;
+ union {
+ struct {
+ __u32 addr_lo;
+ __u32 addr_hi;
+ __u32 data;
+ } guest_msi;
+ __u32 reserved[12];
+ };
+};
+
+
+struct kvm_assigned_msix_nr {
+ __u32 assigned_dev_id;
+ __u16 entry_nr;
+ __u16 padding;
+};
+
+#define KVM_MAX_MSIX_PER_DEV 256
+struct kvm_assigned_msix_entry {
+ __u32 assigned_dev_id;
+ __u32 gsi;
+ __u16 entry; /* The index of entry in the MSI-X table */
+ __u16 padding[3];
+};
+
+#endif /* __LINUX_KVM_H */
diff --git a/kvm/include/linux/kvm_para.h b/kvm/include/linux/kvm_para.h
new file mode 100644
index 000000000..d73109243
--- /dev/null
+++ b/kvm/include/linux/kvm_para.h
@@ -0,0 +1,41 @@
+#ifndef __LINUX_KVM_PARA_H
+#define __LINUX_KVM_PARA_H
+
+/*
+ * This header file provides a method for making a hypercall to the host
+ * Architectures should define:
+ * - kvm_hypercall0, kvm_hypercall1...
+ * - kvm_arch_para_features
+ * - kvm_para_available
+ */
+
+/* Return values for hypercalls */
+#define KVM_ENOSYS 1000
+#define KVM_EFAULT EFAULT
+#define KVM_E2BIG E2BIG
+#define KVM_EPERM EPERM
+
+#define KVM_HC_VAPIC_POLL_IRQ 1
+#define KVM_HC_MMU_OP 2
+
+/*
+ * hypercalls use architecture specific
+ */
+#include <asm/kvm_para.h>
+
+#ifdef __KERNEL__
+#ifdef CONFIG_KVM_GUEST
+void __init kvm_guest_init(void);
+#else
+#define kvm_guest_init() do { } while (0)
+#endif
+
+static inline int kvm_para_has_feature(unsigned int feature)
+{
+ if (kvm_arch_para_features() & (1UL << feature))
+ return 1;
+ return 0;
+}
+#endif /* __KERNEL__ */
+#endif /* __LINUX_KVM_PARA_H */
+
diff --git a/kvm/include/powerpc/asm/kvm.h b/kvm/include/powerpc/asm/kvm.h
new file mode 100644
index 000000000..bb2de6aa5
--- /dev/null
+++ b/kvm/include/powerpc/asm/kvm.h
@@ -0,0 +1,62 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __LINUX_KVM_POWERPC_H
+#define __LINUX_KVM_POWERPC_H
+
+#include <linux/types.h>
+
+struct kvm_regs {
+ __u64 pc;
+ __u64 cr;
+ __u64 ctr;
+ __u64 lr;
+ __u64 xer;
+ __u64 msr;
+ __u64 srr0;
+ __u64 srr1;
+ __u64 pid;
+
+ __u64 sprg0;
+ __u64 sprg1;
+ __u64 sprg2;
+ __u64 sprg3;
+ __u64 sprg4;
+ __u64 sprg5;
+ __u64 sprg6;
+ __u64 sprg7;
+
+ __u64 gpr[32];
+};
+
+struct kvm_sregs {
+};
+
+struct kvm_fpu {
+ __u64 fpr[32];
+};
+
+struct kvm_debug_exit_arch {
+};
+
+/* for KVM_SET_GUEST_DEBUG */
+struct kvm_guest_debug_arch {
+};
+
+#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/kvm/include/powerpc/asm/kvm_para.h b/kvm/include/powerpc/asm/kvm_para.h
new file mode 100644
index 000000000..2d48f6a63
--- /dev/null
+++ b/kvm/include/powerpc/asm/kvm_para.h
@@ -0,0 +1,37 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __POWERPC_KVM_PARA_H__
+#define __POWERPC_KVM_PARA_H__
+
+#ifdef __KERNEL__
+
+static inline int kvm_para_available(void)
+{
+ return 0;
+}
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+ return 0;
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* __POWERPC_KVM_PARA_H__ */
diff --git a/kvm/include/x86/asm/kvm.h b/kvm/include/x86/asm/kvm.h
new file mode 100644
index 000000000..f46b79f6c
--- /dev/null
+++ b/kvm/include/x86/asm/kvm.h
@@ -0,0 +1,287 @@
+#ifndef _ASM_X86_KVM_H
+#define _ASM_X86_KVM_H
+
+/*
+ * KVM x86 specific structures and definitions
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* Select x86 specific features in <linux/kvm.h> */
+#define __KVM_HAVE_PIT
+#define __KVM_HAVE_IOAPIC
+#define __KVM_HAVE_DEVICE_ASSIGNMENT
+#define __KVM_HAVE_MSI
+#define __KVM_HAVE_USER_NMI
+#define __KVM_HAVE_GUEST_DEBUG
+#define __KVM_HAVE_MSIX
+#define __KVM_HAVE_MCE
+#define __KVM_HAVE_PIT_STATE2
+#define __KVM_HAVE_XEN_HVM
+#define __KVM_HAVE_VCPU_EVENTS
+
+/* Architectural interrupt line count. */
+#define KVM_NR_INTERRUPTS 256
+
+struct kvm_memory_alias {
+ __u32 slot; /* this has a different namespace than memory slots */
+ __u32 flags;
+ __u64 guest_phys_addr;
+ __u64 memory_size;
+ __u64 target_phys_addr;
+};
+
+/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
+struct kvm_pic_state {
+ __u8 last_irr; /* edge detection */
+ __u8 irr; /* interrupt request register */
+ __u8 imr; /* interrupt mask register */
+ __u8 isr; /* interrupt service register */
+ __u8 priority_add; /* highest irq priority */
+ __u8 irq_base;
+ __u8 read_reg_select;
+ __u8 poll;
+ __u8 special_mask;
+ __u8 init_state;
+ __u8 auto_eoi;
+ __u8 rotate_on_auto_eoi;
+ __u8 special_fully_nested_mode;
+ __u8 init4; /* true if 4 byte init */
+ __u8 elcr; /* PIIX edge/trigger selection */
+ __u8 elcr_mask;
+};
+
+#define KVM_IOAPIC_NUM_PINS 24
+struct kvm_ioapic_state {
+ __u64 base_address;
+ __u32 ioregsel;
+ __u32 id;
+ __u32 irr;
+ __u32 pad;
+ union {
+ __u64 bits;
+ struct {
+ __u8 vector;
+ __u8 delivery_mode:3;
+ __u8 dest_mode:1;
+ __u8 delivery_status:1;
+ __u8 polarity:1;
+ __u8 remote_irr:1;
+ __u8 trig_mode:1;
+ __u8 mask:1;
+ __u8 reserve:7;
+ __u8 reserved[4];
+ __u8 dest_id;
+ } fields;
+ } redirtbl[KVM_IOAPIC_NUM_PINS];
+};
+
+#define KVM_IRQCHIP_PIC_MASTER 0
+#define KVM_IRQCHIP_PIC_SLAVE 1
+#define KVM_IRQCHIP_IOAPIC 2
+#define KVM_NR_IRQCHIPS 3
+
+/* for KVM_GET_REGS and KVM_SET_REGS */
+struct kvm_regs {
+ /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
+ __u64 rax, rbx, rcx, rdx;
+ __u64 rsi, rdi, rsp, rbp;
+ __u64 r8, r9, r10, r11;
+ __u64 r12, r13, r14, r15;
+ __u64 rip, rflags;
+};
+
+/* for KVM_GET_LAPIC and KVM_SET_LAPIC */
+#define KVM_APIC_REG_SIZE 0x400
+struct kvm_lapic_state {
+ char regs[KVM_APIC_REG_SIZE];
+};
+
+struct kvm_segment {
+ __u64 base;
+ __u32 limit;
+ __u16 selector;
+ __u8 type;
+ __u8 present, dpl, db, s, l, g, avl;
+ __u8 unusable;
+ __u8 padding;
+};
+
+struct kvm_dtable {
+ __u64 base;
+ __u16 limit;
+ __u16 padding[3];
+};
+
+
+/* for KVM_GET_SREGS and KVM_SET_SREGS */
+struct kvm_sregs {
+ /* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */
+ struct kvm_segment cs, ds, es, fs, gs, ss;
+ struct kvm_segment tr, ldt;
+ struct kvm_dtable gdt, idt;
+ __u64 cr0, cr2, cr3, cr4, cr8;
+ __u64 efer;
+ __u64 apic_base;
+ __u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];
+};
+
+/* for KVM_GET_FPU and KVM_SET_FPU */
+struct kvm_fpu {
+ __u8 fpr[8][16];
+ __u16 fcw;
+ __u16 fsw;
+ __u8 ftwx; /* in fxsave format */
+ __u8 pad1;
+ __u16 last_opcode;
+ __u64 last_ip;
+ __u64 last_dp;
+ __u8 xmm[16][16];
+ __u32 mxcsr;
+ __u32 pad2;
+};
+
+struct kvm_msr_entry {
+ __u32 index;
+ __u32 reserved;
+ __u64 data;
+};
+
+/* for KVM_GET_MSRS and KVM_SET_MSRS */
+struct kvm_msrs {
+ __u32 nmsrs; /* number of msrs in entries */
+ __u32 pad;
+
+ struct kvm_msr_entry entries[0];
+};
+
+/* for KVM_GET_MSR_INDEX_LIST */
+struct kvm_msr_list {
+ __u32 nmsrs; /* number of msrs in entries */
+ __u32 indices[0];
+};
+
+
+struct kvm_cpuid_entry {
+ __u32 function;
+ __u32 eax;
+ __u32 ebx;
+ __u32 ecx;
+ __u32 edx;
+ __u32 padding;
+};
+
+/* for KVM_SET_CPUID */
+struct kvm_cpuid {
+ __u32 nent;
+ __u32 padding;
+ struct kvm_cpuid_entry entries[0];
+};
+
+struct kvm_cpuid_entry2 {
+ __u32 function;
+ __u32 index;
+ __u32 flags;
+ __u32 eax;
+ __u32 ebx;
+ __u32 ecx;
+ __u32 edx;
+ __u32 padding[3];
+};
+
+#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX 1
+#define KVM_CPUID_FLAG_STATEFUL_FUNC 2
+#define KVM_CPUID_FLAG_STATE_READ_NEXT 4
+
+/* for KVM_SET_CPUID2 */
+struct kvm_cpuid2 {
+ __u32 nent;
+ __u32 padding;
+ struct kvm_cpuid_entry2 entries[0];
+};
+
+/* for KVM_GET_PIT and KVM_SET_PIT */
+struct kvm_pit_channel_state {
+ __u32 count; /* can be 65536 */
+ __u16 latched_count;
+ __u8 count_latched;
+ __u8 status_latched;
+ __u8 status;
+ __u8 read_state;
+ __u8 write_state;
+ __u8 write_latch;
+ __u8 rw_mode;
+ __u8 mode;
+ __u8 bcd;
+ __u8 gate;
+ __s64 count_load_time;
+};
+
+struct kvm_debug_exit_arch {
+ __u32 exception;
+ __u32 pad;
+ __u64 pc;
+ __u64 dr6;
+ __u64 dr7;
+};
+
+#define KVM_GUESTDBG_USE_SW_BP 0x00010000
+#define KVM_GUESTDBG_USE_HW_BP 0x00020000
+#define KVM_GUESTDBG_INJECT_DB 0x00040000
+#define KVM_GUESTDBG_INJECT_BP 0x00080000
+
+/* for KVM_SET_GUEST_DEBUG */
+struct kvm_guest_debug_arch {
+ __u64 debugreg[8];
+};
+
+struct kvm_pit_state {
+ struct kvm_pit_channel_state channels[3];
+};
+
+#define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001
+
+struct kvm_pit_state2 {
+ struct kvm_pit_channel_state channels[3];
+ __u32 flags;
+ __u32 reserved[9];
+};
+
+struct kvm_reinject_control {
+ __u8 pit_reinject;
+ __u8 reserved[31];
+};
+
+/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */
+#define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001
+#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002
+
+/* for KVM_GET/SET_VCPU_EVENTS */
+struct kvm_vcpu_events {
+ struct {
+ __u8 injected;
+ __u8 nr;
+ __u8 has_error_code;
+ __u8 pad;
+ __u32 error_code;
+ } exception;
+ struct {
+ __u8 injected;
+ __u8 nr;
+ __u8 soft;
+ __u8 pad;
+ } interrupt;
+ struct {
+ __u8 injected;
+ __u8 pending;
+ __u8 masked;
+ __u8 pad;
+ } nmi;
+ __u32 sipi_vector;
+ __u32 flags;
+ __u32 reserved[10];
+};
+
+#endif /* _ASM_X86_KVM_H */
diff --git a/kvm/include/x86/asm/kvm_para.h b/kvm/include/x86/asm/kvm_para.h
new file mode 100644
index 000000000..c584076a4
--- /dev/null
+++ b/kvm/include/x86/asm/kvm_para.h
@@ -0,0 +1,149 @@
+#ifndef _ASM_X86_KVM_PARA_H
+#define _ASM_X86_KVM_PARA_H
+
+#include <linux/types.h>
+
+/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It
+ * should be used to determine that a VM is running under KVM.
+ */
+#define KVM_CPUID_SIGNATURE 0x40000000
+
+/* This CPUID returns a feature bitmap in eax. Before enabling a particular
+ * paravirtualization, the appropriate feature bit should be checked.
+ */
+#define KVM_CPUID_FEATURES 0x40000001
+#define KVM_FEATURE_CLOCKSOURCE 0
+#define KVM_FEATURE_NOP_IO_DELAY 1
+#define KVM_FEATURE_MMU_OP 2
+
+#define MSR_KVM_WALL_CLOCK 0x11
+#define MSR_KVM_SYSTEM_TIME 0x12
+
+#define KVM_MAX_MMU_OP_BATCH 32
+
+/* Operations for KVM_HC_MMU_OP */
+#define KVM_MMU_OP_WRITE_PTE 1
+#define KVM_MMU_OP_FLUSH_TLB 2
+#define KVM_MMU_OP_RELEASE_PT 3
+
+/* Payload for KVM_HC_MMU_OP */
+struct kvm_mmu_op_header {
+ __u32 op;
+ __u32 pad;
+};
+
+struct kvm_mmu_op_write_pte {
+ struct kvm_mmu_op_header header;
+ __u64 pte_phys;
+ __u64 pte_val;
+};
+
+struct kvm_mmu_op_flush_tlb {
+ struct kvm_mmu_op_header header;
+};
+
+struct kvm_mmu_op_release_pt {
+ struct kvm_mmu_op_header header;
+ __u64 pt_phys;
+};
+
+#ifdef __KERNEL__
+#include <asm/processor.h>
+
+extern void kvmclock_init(void);
+
+
+/* This instruction is vmcall. On non-VT architectures, it will generate a
+ * trap that we will then rewrite to the appropriate instruction.
+ */
+#define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1"
+
+/* For KVM hypercalls, a three-byte sequence of either the vmrun or the vmmrun
+ * instruction. The hypervisor may replace it with something else but only the
+ * instructions are guaranteed to be supported.
+ *
+ * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively.
+ * The hypercall number should be placed in rax and the return value will be
+ * placed in rax. No other registers will be clobbered unless explicited
+ * noted by the particular hypercall.
+ */
+
+static inline long kvm_hypercall0(unsigned int nr)
+{
+ long ret;
+ asm volatile(KVM_HYPERCALL
+ : "=a"(ret)
+ : "a"(nr)
+ : "memory");
+ return ret;
+}
+
+static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
+{
+ long ret;
+ asm volatile(KVM_HYPERCALL
+ : "=a"(ret)
+ : "a"(nr), "b"(p1)
+ : "memory");
+ return ret;
+}
+
+static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
+ unsigned long p2)
+{
+ long ret;
+ asm volatile(KVM_HYPERCALL
+ : "=a"(ret)
+ : "a"(nr), "b"(p1), "c"(p2)
+ : "memory");
+ return ret;
+}
+
+static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
+ unsigned long p2, unsigned long p3)
+{
+ long ret;
+ asm volatile(KVM_HYPERCALL
+ : "=a"(ret)
+ : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
+ : "memory");
+ return ret;
+}
+
+static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
+ unsigned long p2, unsigned long p3,
+ unsigned long p4)
+{
+ long ret;
+ asm volatile(KVM_HYPERCALL
+ : "=a"(ret)
+ : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)
+ : "memory");
+ return ret;
+}
+
+static inline int kvm_para_available(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+ char signature[13];
+
+ cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
+ memcpy(signature + 0, &ebx, 4);
+ memcpy(signature + 4, &ecx, 4);
+ memcpy(signature + 8, &edx, 4);
+ signature[12] = 0;
+
+ if (strcmp(signature, "KVMKVMKVM") == 0)
+ return 1;
+
+ return 0;
+}
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+ return cpuid_eax(KVM_CPUID_FEATURES);
+}
+
+#endif
+
+#endif /* _ASM_X86_KVM_PARA_H */
diff --git a/kvm/kvm.spec b/kvm/kvm.spec
new file mode 100644
index 000000000..92acb0ed2
--- /dev/null
+++ b/kvm/kvm.spec
@@ -0,0 +1,139 @@
+Name: kvm
+Version: 0.0
+Release: 0
+Summary: Kernel Virtual Machine virtualization environment
+
+Group: System Environment/Kernel
+License: GPL
+URL: http://www.qumranet.com
+BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}
+
+ExclusiveArch: i386 x86_64 ia64
+
+Requires: kvm-kmod bridge-utils
+
+%define Distribution %(rpm -q -qf /etc/redhat-release --qf '%%{name}' | cut -d"-" -f 1)
+%define os_version %(rpm -q --qf '%%{version}' %{Distribution}-release)
+%define os_release %(rpm -q --qf '%%{release}' %{Distribution}-release | cut -d"." -f 1)
+
+%if %([ x"%{Distribution}" = x"fedora" -a x"%{os_version}" = x"5" ] && echo 1 || echo 0)
+%define require_gccver 32
+%endif
+
+%if %([ x"%{Distribution}" = x"fedora" -a 0"%{os_version}" -ge "8" ] && echo 1 || echo 0)
+%define qemuldflags --qemu-ldflags=-Wl,--build-id
+%else
+%define qemuldflags ""
+%endif
+
+%if %([ x"%{Distribution}" = x"centos" -a x"%{os_version}" = x"4" ] && echo 1 || echo 0)
+%define require_gccver 32
+%endif
+
+%if %([ x"%{Distribution}" = x"redhat" -a x"%{os_release}" = x"5" ] && echo 1 || echo 0)
+%define require_gccver 34
+%endif
+
+%if %( [ x"%{require_gccver}" = x"32" ] && echo 1 || echo 0)
+BuildRequires: compat-gcc-32
+%else
+BuildRequires: compat-gcc-34
+%endif
+
+BuildRequires: SDL-devel zlib-devel alsa-lib-devel
+
+%define _prebuilt %{?prebuilt:1}%{!?prebuilt:0}
+
+%if !%{_prebuilt}
+Source0: kvm.tar.gz
+Source1: user.tar.gz
+Source2: kernel.tar.gz
+Source3: scripts.tar.gz
+Source4: Makefile
+Source5: configure
+Source6: kvm_stat
+Source7: libkvm.tar.gz
+Source8: extboot.tar.gz
+%endif
+
+%description
+The Kernel Virtual Machine provides a virtualization enviroment for processors
+with hardware support for virtualization: Intel's VT-x&VT-i and AMD's AMD-V.
+
+%prep
+
+%if !%{_prebuilt}
+%setup -T -b 0 -n qemu
+%setup -T -b 1 -n user -D
+%setup -T -b 2 -n kernel -D
+%setup -T -b 7 -n libkvm -D
+%setup -T -b 3 -n scripts -D
+%setup -T -b 8 -n extboot -D
+cd ..
+cp %{_sourcedir}/Makefile %{_sourcedir}/configure %{_sourcedir}/kvm_stat .
+%endif
+
+%build
+
+rm -rf %{buildroot}
+
+%if !%{_prebuilt}
+cd ..
+./configure --prefix=/usr/kvm %{qemuldflags}
+make -C libkvm
+make -C user
+%ifarch i386 x86_64
+make extboot
+%endif
+#(cd qemu;
+# ./co
+# kpath="$(readlink -f ../kernel/include)"
+# upath="$(readlink -f ../user)"
+# ./configure --target-list=$(uname -i)-softmmu \
+# --extra-cflags="-I$kpath -I$upath" \
+# --extra-ldflags="-L$upath" \
+# --disable-kqemu --enable-kvm --prefix=/usr/kvm
+#)
+make -C qemu
+%endif
+
+%install
+
+%if !%{_prebuilt}
+cd ..
+%else
+cd %{objdir}
+%endif
+
+make DESTDIR=%{buildroot} install-rpm
+
+%define bindir /usr/bin
+%define bin %{bindir}/kvm
+%define initdir /etc/init.d
+%define confdir /etc/kvm
+%define utilsdir /etc/kvm/utils
+
+%post
+/sbin/chkconfig --add kvm
+/sbin/chkconfig --level 2345 kvm on
+/sbin/chkconfig --level 16 kvm off
+/usr/sbin/groupadd -fg 444 kvm
+
+%preun
+if [ "$1" != 0 ]; then
+ /sbin/service kvm stop
+ /sbin/chkconfig --level 2345 kvm off
+ /sbin/chkconfig --del kvm
+fi
+
+%clean
+%{__rm} -rf %{buildroot}
+
+%files
+/usr/bin/kvm
+/usr/bin/kvm_stat
+%{confdir}/qemu-ifup
+%{initdir}/kvm
+/etc/udev/rules.d/*kvm*.rules
+/usr/kvm
+%changelog
diff --git a/kvm/kvm_stat b/kvm/kvm_stat
new file mode 100755
index 000000000..21aff5b68
--- /dev/null
+++ b/kvm/kvm_stat
@@ -0,0 +1,129 @@
+#!/usr/bin/python
+
+import curses
+import sys, os, time, optparse
+
+class Stats:
+ def __init__(self, fields = None):
+ def wanted(key):
+ import re
+ if not fields:
+ return True
+ return re.match(fields, key) != None
+ self.base = '/sys/kernel/debug/kvm'
+ self.values = {}
+ for key in os.listdir(self.base):
+ if wanted(key):
+ self.values[key] = None
+ def get(self):
+ for key, oldval in self.values.iteritems():
+ newval = int(file(self.base + '/' + key).read())
+ newdelta = None
+ if oldval is not None:
+ newdelta = newval - oldval[0]
+ self.values[key] = (newval, newdelta)
+ return self.values
+
+if not os.access('/sys/kernel/debug', os.F_OK):
+ print 'Please enable CONFIG_DEBUG_FS in your kernel'
+ sys.exit(1)
+if not os.access('/sys/kernel/debug/kvm', os.F_OK):
+ print "Please mount debugfs ('mount -t debugfs debugfs /sys/kernel/debug')"
+ print "and ensure the kvm modules are loaded"
+ sys.exit(1)
+
+label_width = 20
+number_width = 10
+
+def tui(screen, stats):
+ curses.use_default_colors()
+ curses.noecho()
+ def refresh():
+ screen.erase()
+ screen.addstr(0, 0, 'kvm statistics')
+ row = 2
+ s = stats.get()
+ for key in sorted(s.keys()):
+ if row >= screen.getmaxyx()[0]:
+ break
+ values = s[key]
+ col = 1
+ screen.addstr(row, col, key)
+ col += label_width
+ screen.addstr(row, col, '%10d' % (values[0],))
+ col += number_width
+ if values[1] is not None:
+ screen.addstr(row, col, '%8d' % (values[1],))
+ row += 1
+ screen.refresh()
+
+ while True:
+ refresh()
+ curses.halfdelay(10)
+ try:
+ c = screen.getkey()
+ if c == 'q':
+ break
+ except KeyboardInterrupt:
+ break
+ except curses.error:
+ continue
+
+def batch(stats):
+ s = stats.get()
+ time.sleep(1)
+ s = stats.get()
+ for key in sorted(s.keys()):
+ values = s[key]
+ print '%-22s%10d%10d' % (key, values[0], values[1])
+
+def log(stats):
+ keys = sorted(stats.get().iterkeys())
+ def banner():
+ for k in keys:
+ print '%10s' % k[0:9],
+ print
+ def statline():
+ s = stats.get()
+ for k in keys:
+ print ' %9d' % s[k][1],
+ print
+ line = 0
+ banner_repeat = 20
+ while True:
+ time.sleep(1)
+ if line % banner_repeat == 0:
+ banner()
+ statline()
+ line += 1
+
+options = optparse.OptionParser()
+options.add_option('-1', '--once', '--batch',
+ action = 'store_true',
+ default = False,
+ dest = 'once',
+ help = 'run in batch mode for one second',
+ )
+options.add_option('-l', '--log',
+ action = 'store_true',
+ default = False,
+ dest = 'log',
+ help = 'run in logging mode (like vmstat)',
+ )
+options.add_option('-f', '--fields',
+ action = 'store',
+ default = None,
+ dest = 'fields',
+ help = 'fields to display (regex)',
+ )
+(options, args) = options.parse_args(sys.argv)
+
+stats = Stats(fields = options.fields)
+
+if options.log:
+ log(stats)
+elif not options.once:
+ import curses.wrapper
+ curses.wrapper(tui, stats)
+else:
+ batch(stats)
diff --git a/kvm/libfdt/Makefile b/kvm/libfdt/Makefile
new file mode 100644
index 000000000..db80e47a6
--- /dev/null
+++ b/kvm/libfdt/Makefile
@@ -0,0 +1,19 @@
+include ../config.mak
+include ../user/config.mak
+
+LIBFDT_SRCS = fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
+LIBFDT_INCLUDES = fdt.h libfdt.h
+LIBFDT_EXTRA = libfdt_internal.h
+LIBFDT_LIB = libfdt.a
+
+LIBFDT_OBJS = $(LIBFDT_SRCS:%.c=%.o)
+
+CFLAGS += -I .
+
+$(LIBFDT_LIB): $(LIBFDT_OBJS)
+ $(AR) rcs $@ $^
+
+all: $(LIBFDT_LIB)
+
+clean:
+ rm -rf *.o *.a
diff --git a/kvm/libfdt/README b/kvm/libfdt/README
new file mode 100644
index 000000000..491bc76ee
--- /dev/null
+++ b/kvm/libfdt/README
@@ -0,0 +1,3 @@
+libfdt was grabbed from dtc source. This is the upstream source for libfdt.
+It can be found here:
+http://www.jdl.com/software/
diff --git a/kvm/libfdt/fdt.c b/kvm/libfdt/fdt.c
new file mode 100644
index 000000000..bd9171237
--- /dev/null
+++ b/kvm/libfdt/fdt.c
@@ -0,0 +1,194 @@
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ * b) Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "libfdt_env.h"
+
+#include <fdt.h>
+#include <libfdt.h>
+
+#include "libfdt_internal.h"
+
+int fdt_check_header(const void *fdt)
+{
+ if (fdt_magic(fdt) == FDT_MAGIC) {
+ /* Complete tree */
+ if (fdt_version(fdt) < FDT_FIRST_SUPPORTED_VERSION)
+ return -FDT_ERR_BADVERSION;
+ if (fdt_last_comp_version(fdt) > FDT_LAST_SUPPORTED_VERSION)
+ return -FDT_ERR_BADVERSION;
+ } else if (fdt_magic(fdt) == SW_MAGIC) {
+ /* Unfinished sequential-write blob */
+ if (fdt_size_dt_struct(fdt) == 0)
+ return -FDT_ERR_BADSTATE;
+ } else {
+ return -FDT_ERR_BADMAGIC;
+ }
+
+ return 0;
+}
+
+const void *fdt_offset_ptr(const void *fdt, int offset, int len)
+{
+ const void *p;
+
+ if (fdt_version(fdt) >= 0x11)
+ if (((offset + len) < offset)
+ || ((offset + len) > fdt_size_dt_struct(fdt)))
+ return NULL;
+
+ p = _fdt_offset_ptr(fdt, offset);
+
+ if (p + len < p)
+ return NULL;
+ return p;
+}
+
+uint32_t fdt_next_tag(const void *fdt, int offset, int *nextoffset)
+{
+ const uint32_t *tagp, *lenp;
+ uint32_t tag;
+ const char *p;
+
+ if (offset % FDT_TAGSIZE)
+ return -1;
+
+ tagp = fdt_offset_ptr(fdt, offset, FDT_TAGSIZE);
+ if (! tagp)
+ return FDT_END; /* premature end */
+ tag = fdt32_to_cpu(*tagp);
+ offset += FDT_TAGSIZE;
+
+ switch (tag) {
+ case FDT_BEGIN_NODE:
+ /* skip name */
+ do {
+ p = fdt_offset_ptr(fdt, offset++, 1);
+ } while (p && (*p != '\0'));
+ if (! p)
+ return FDT_END;
+ break;
+ case FDT_PROP:
+ lenp = fdt_offset_ptr(fdt, offset, sizeof(*lenp));
+ if (! lenp)
+ return FDT_END;
+ /* skip name offset, length and value */
+ offset += 2*FDT_TAGSIZE + fdt32_to_cpu(*lenp);
+ break;
+ }
+
+ if (nextoffset)
+ *nextoffset = ALIGN(offset, FDT_TAGSIZE);
+
+ return tag;
+}
+
+int fdt_next_node(const void *fdt, int offset, int *depth)
+{
+ int nextoffset = 0;
+ uint32_t tag;
+
+ if (offset >= 0) {
+ tag = fdt_next_tag(fdt, offset, &nextoffset);
+ if (tag != FDT_BEGIN_NODE)
+ return -FDT_ERR_BADOFFSET;
+ }
+
+ do {
+ offset = nextoffset;
+ tag = fdt_next_tag(fdt, offset, &nextoffset);
+
+ switch (tag) {
+ case FDT_PROP:
+ case FDT_NOP:
+ break;
+
+ case FDT_BEGIN_NODE:
+ if (depth)
+ (*depth)++;
+ break;
+
+ case FDT_END_NODE:
+ if (depth)
+ (*depth)--;
+ break;
+
+ case FDT_END:
+ return -FDT_ERR_NOTFOUND;
+
+ default:
+ return -FDT_ERR_BADSTRUCTURE;
+ }
+ } while (tag != FDT_BEGIN_NODE);
+
+ return offset;
+}
+
+const char *_fdt_find_string(const char *strtab, int tabsize, const char *s)
+{
+ int len = strlen(s) + 1;
+ const char *last = strtab + tabsize - len;
+ const char *p;
+
+ for (p = strtab; p <= last; p++)
+ if (memeq(p, s, len))
+ return p;
+ return NULL;
+}
+
+int fdt_move(const void *fdt, void *buf, int bufsize)
+{
+ CHECK_HEADER(fdt);
+
+ if (fdt_totalsize(fdt) > bufsize)
+ return -FDT_ERR_NOSPACE;
+
+ memmove(buf, fdt, fdt_totalsize(fdt));
+ return 0;
+}
diff --git a/kvm/libfdt/fdt.h b/kvm/libfdt/fdt.h
new file mode 100644
index 000000000..48ccfd910
--- /dev/null
+++ b/kvm/libfdt/fdt.h
@@ -0,0 +1,60 @@
+#ifndef _FDT_H
+#define _FDT_H
+
+#ifndef __ASSEMBLY__
+
+struct fdt_header {
+ uint32_t magic; /* magic word FDT_MAGIC */
+ uint32_t totalsize; /* total size of DT block */
+ uint32_t off_dt_struct; /* offset to structure */
+ uint32_t off_dt_strings; /* offset to strings */
+ uint32_t off_mem_rsvmap; /* offset to memory reserve map */
+ uint32_t version; /* format version */
+ uint32_t last_comp_version; /* last compatible version */
+
+ /* version 2 fields below */
+ uint32_t boot_cpuid_phys; /* Which physical CPU id we're
+ booting on */
+ /* version 3 fields below */
+ uint32_t size_dt_strings; /* size of the strings block */
+
+ /* version 17 fields below */
+ uint32_t size_dt_struct; /* size of the structure block */
+};
+
+struct fdt_reserve_entry {
+ uint64_t address;
+ uint64_t size;
+};
+
+struct fdt_node_header {
+ uint32_t tag;
+ char name[0];
+};
+
+struct fdt_property {
+ uint32_t tag;
+ uint32_t len;
+ uint32_t nameoff;
+ char data[0];
+};
+
+#endif /* !__ASSEMBLY */
+
+#define FDT_MAGIC 0xd00dfeed /* 4: version, 4: total size */
+#define FDT_TAGSIZE sizeof(uint32_t)
+
+#define FDT_BEGIN_NODE 0x1 /* Start node: full name */
+#define FDT_END_NODE 0x2 /* End node */
+#define FDT_PROP 0x3 /* Property: name off,
+ size, content */
+#define FDT_NOP 0x4 /* nop */
+#define FDT_END 0x9
+
+#define FDT_V1_SIZE (7*sizeof(uint32_t))
+#define FDT_V2_SIZE (FDT_V1_SIZE + sizeof(uint32_t))
+#define FDT_V3_SIZE (FDT_V2_SIZE + sizeof(uint32_t))
+#define FDT_V16_SIZE FDT_V3_SIZE
+#define FDT_V17_SIZE (FDT_V16_SIZE + sizeof(uint32_t))
+
+#endif /* _FDT_H */
diff --git a/kvm/libfdt/fdt_ro.c b/kvm/libfdt/fdt_ro.c
new file mode 100644
index 000000000..63fa1290b
--- /dev/null
+++ b/kvm/libfdt/fdt_ro.c
@@ -0,0 +1,476 @@
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ * b) Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "libfdt_env.h"
+
+#include <fdt.h>
+#include <libfdt.h>
+
+#include "libfdt_internal.h"
+
+static int nodename_eq(const void *fdt, int offset,
+ const char *s, int len)
+{
+ const char *p = fdt_offset_ptr(fdt, offset + FDT_TAGSIZE, len+1);
+
+ if (! p)
+ /* short match */
+ return 0;
+
+ if (memcmp(p, s, len) != 0)
+ return 0;
+
+ if (p[len] == '\0')
+ return 1;
+ else if (!memchr(s, '@', len) && (p[len] == '@'))
+ return 1;
+ else
+ return 0;
+}
+
+const char *fdt_string(const void *fdt, int stroffset)
+{
+ return (char *)fdt + fdt_off_dt_strings(fdt) + stroffset;
+}
+
+int fdt_get_mem_rsv(const void *fdt, int n, uint64_t *address, uint64_t *size)
+{
+ CHECK_HEADER(fdt);
+ *address = fdt64_to_cpu(_fdt_mem_rsv(fdt, n)->address);
+ *size = fdt64_to_cpu(_fdt_mem_rsv(fdt, n)->size);
+ return 0;
+}
+
+int fdt_num_mem_rsv(const void *fdt)
+{
+ int i = 0;
+
+ while (fdt64_to_cpu(_fdt_mem_rsv(fdt, i)->size) != 0)
+ i++;
+ return i;
+}
+
+int fdt_subnode_offset_namelen(const void *fdt, int offset,
+ const char *name, int namelen)
+{
+ int depth;
+
+ CHECK_HEADER(fdt);
+
+ for (depth = 0;
+ offset >= 0;
+ offset = fdt_next_node(fdt, offset, &depth)) {
+ if (depth < 0)
+ return -FDT_ERR_NOTFOUND;
+ else if ((depth == 1)
+ && nodename_eq(fdt, offset, name, namelen))
+ return offset;
+ }
+
+ return offset; /* error */
+}
+
+int fdt_subnode_offset(const void *fdt, int parentoffset,
+ const char *name)
+{
+ return fdt_subnode_offset_namelen(fdt, parentoffset, name, strlen(name));
+}
+
+int fdt_path_offset(const void *fdt, const char *path)
+{
+ const char *end = path + strlen(path);
+ const char *p = path;
+ int offset = 0;
+
+ CHECK_HEADER(fdt);
+
+ if (*path != '/')
+ return -FDT_ERR_BADPATH;
+
+ while (*p) {
+ const char *q;
+
+ while (*p == '/')
+ p++;
+ if (! *p)
+ return offset;
+ q = strchr(p, '/');
+ if (! q)
+ q = end;
+
+ offset = fdt_subnode_offset_namelen(fdt, offset, p, q-p);
+ if (offset < 0)
+ return offset;
+
+ p = q;
+ }
+
+ return offset;
+}
+
+const char *fdt_get_name(const void *fdt, int nodeoffset, int *len)
+{
+ const struct fdt_node_header *nh;
+ int err;
+
+ if ((err = fdt_check_header(fdt)) != 0)
+ goto fail;
+
+ err = -FDT_ERR_BADOFFSET;
+ nh = fdt_offset_ptr(fdt, nodeoffset, sizeof(*nh));
+ if (!nh || (fdt32_to_cpu(nh->tag) != FDT_BEGIN_NODE))
+ goto fail;
+
+ if (len)
+ *len = strlen(nh->name);
+
+ return nh->name;
+
+ fail:
+ if (len)
+ *len = err;
+ return NULL;
+}
+
+const struct fdt_property *fdt_get_property(const void *fdt,
+ int nodeoffset,
+ const char *name, int *lenp)
+{
+ uint32_t tag;
+ const struct fdt_property *prop;
+ int namestroff;
+ int offset, nextoffset;
+ int err;
+
+ if ((err = fdt_check_header(fdt)) != 0)
+ goto fail;
+
+ err = -FDT_ERR_BADOFFSET;
+ if (nodeoffset % FDT_TAGSIZE)
+ goto fail;
+
+ tag = fdt_next_tag(fdt, nodeoffset, &nextoffset);
+ if (tag != FDT_BEGIN_NODE)
+ goto fail;
+
+ do {
+ offset = nextoffset;
+
+ tag = fdt_next_tag(fdt, offset, &nextoffset);
+ switch (tag) {
+ case FDT_END:
+ err = -FDT_ERR_TRUNCATED;
+ goto fail;
+
+ case FDT_BEGIN_NODE:
+ case FDT_END_NODE:
+ case FDT_NOP:
+ break;
+
+ case FDT_PROP:
+ err = -FDT_ERR_BADSTRUCTURE;
+ prop = fdt_offset_ptr(fdt, offset, sizeof(*prop));
+ if (! prop)
+ goto fail;
+ namestroff = fdt32_to_cpu(prop->nameoff);
+ if (streq(fdt_string(fdt, namestroff), name)) {
+ /* Found it! */
+ int len = fdt32_to_cpu(prop->len);
+ prop = fdt_offset_ptr(fdt, offset,
+ sizeof(*prop)+len);
+ if (! prop)
+ goto fail;
+
+ if (lenp)
+ *lenp = len;
+
+ return prop;
+ }
+ break;
+
+ default:
+ err = -FDT_ERR_BADSTRUCTURE;
+ goto fail;
+ }
+ } while ((tag != FDT_BEGIN_NODE) && (tag != FDT_END_NODE));
+
+ err = -FDT_ERR_NOTFOUND;
+ fail:
+ if (lenp)
+ *lenp = err;
+ return NULL;
+}
+
+const void *fdt_getprop(const void *fdt, int nodeoffset,
+ const char *name, int *lenp)
+{
+ const struct fdt_property *prop;
+
+ prop = fdt_get_property(fdt, nodeoffset, name, lenp);
+ if (! prop)
+ return NULL;
+
+ return prop->data;
+}
+
+uint32_t fdt_get_phandle(const void *fdt, int nodeoffset)
+{
+ const uint32_t *php;
+ int len;
+
+ php = fdt_getprop(fdt, nodeoffset, "linux,phandle", &len);
+ if (!php || (len != sizeof(*php)))
+ return 0;
+
+ return fdt32_to_cpu(*php);
+}
+
+int fdt_get_path(const void *fdt, int nodeoffset, char *buf, int buflen)
+{
+ int pdepth = 0, p = 0;
+ int offset, depth, namelen;
+ const char *name;
+
+ CHECK_HEADER(fdt);
+
+ if (buflen < 2)
+ return -FDT_ERR_NOSPACE;
+
+ for (offset = 0, depth = 0;
+ (offset >= 0) && (offset <= nodeoffset);
+ offset = fdt_next_node(fdt, offset, &depth)) {
+ if (pdepth < depth)
+ continue; /* overflowed buffer */
+
+ while (pdepth > depth) {
+ do {
+ p--;
+ } while (buf[p-1] != '/');
+ pdepth--;
+ }
+
+ name = fdt_get_name(fdt, offset, &namelen);
+ if (!name)
+ return namelen;
+ if ((p + namelen + 1) <= buflen) {
+ memcpy(buf + p, name, namelen);
+ p += namelen;
+ buf[p++] = '/';
+ pdepth++;
+ }
+
+ if (offset == nodeoffset) {
+ if (pdepth < (depth + 1))
+ return -FDT_ERR_NOSPACE;
+
+ if (p > 1) /* special case so that root path is "/", not "" */
+ p--;
+ buf[p] = '\0';
+ return p;
+ }
+ }
+
+ if ((offset == -FDT_ERR_NOTFOUND) || (offset >= 0))
+ return -FDT_ERR_BADOFFSET;
+ else if (offset == -FDT_ERR_BADOFFSET)
+ return -FDT_ERR_BADSTRUCTURE;
+
+ return offset; /* error from fdt_next_node() */
+}
+
+int fdt_supernode_atdepth_offset(const void *fdt, int nodeoffset,
+ int supernodedepth, int *nodedepth)
+{
+ int offset, depth;
+ int supernodeoffset = -FDT_ERR_INTERNAL;
+
+ CHECK_HEADER(fdt);
+
+ if (supernodedepth < 0)
+ return -FDT_ERR_NOTFOUND;
+
+ for (offset = 0, depth = 0;
+ (offset >= 0) && (offset <= nodeoffset);
+ offset = fdt_next_node(fdt, offset, &depth)) {
+ if (depth == supernodedepth)
+ supernodeoffset = offset;
+
+ if (offset == nodeoffset) {
+ if (nodedepth)
+ *nodedepth = depth;
+
+ if (supernodedepth > depth)
+ return -FDT_ERR_NOTFOUND;
+ else
+ return supernodeoffset;
+ }
+ }
+
+ if ((offset == -FDT_ERR_NOTFOUND) || (offset >= 0))
+ return -FDT_ERR_BADOFFSET;
+ else if (offset == -FDT_ERR_BADOFFSET)
+ return -FDT_ERR_BADSTRUCTURE;
+
+ return offset; /* error from fdt_next_node() */
+}
+
+int fdt_node_depth(const void *fdt, int nodeoffset)
+{
+ int nodedepth;
+ int err;
+
+ err = fdt_supernode_atdepth_offset(fdt, nodeoffset, 0, &nodedepth);
+ if (err)
+ return (err < 0) ? err : -FDT_ERR_INTERNAL;
+ return nodedepth;
+}
+
+int fdt_parent_offset(const void *fdt, int nodeoffset)
+{
+ int nodedepth = fdt_node_depth(fdt, nodeoffset);
+
+ if (nodedepth < 0)
+ return nodedepth;
+ return fdt_supernode_atdepth_offset(fdt, nodeoffset,
+ nodedepth - 1, NULL);
+}
+
+int fdt_node_offset_by_prop_value(const void *fdt, int startoffset,
+ const char *propname,
+ const void *propval, int proplen)
+{
+ int offset;
+ const void *val;
+ int len;
+
+ CHECK_HEADER(fdt);
+
+ /* FIXME: The algorithm here is pretty horrible: we scan each
+ * property of a node in fdt_getprop(), then if that didn't
+ * find what we want, we scan over them again making our way
+ * to the next node. Still it's the easiest to implement
+ * approach; performance can come later. */
+ for (offset = fdt_next_node(fdt, startoffset, NULL);
+ offset >= 0;
+ offset = fdt_next_node(fdt, offset, NULL)) {
+ val = fdt_getprop(fdt, offset, propname, &len);
+ if (val && (len == proplen)
+ && (memcmp(val, propval, len) == 0))
+ return offset;
+ }
+
+ return offset; /* error from fdt_next_node() */
+}
+
+int fdt_node_offset_by_phandle(const void *fdt, uint32_t phandle)
+{
+ if ((phandle == 0) || (phandle == -1))
+ return -FDT_ERR_BADPHANDLE;
+ phandle = cpu_to_fdt32(phandle);
+ return fdt_node_offset_by_prop_value(fdt, -1, "linux,phandle",
+ &phandle, sizeof(phandle));
+}
+
+int _stringlist_contains(const void *strlist, int listlen, const char *str)
+{
+ int len = strlen(str);
+ const void *p;
+
+ while (listlen >= len) {
+ if (memcmp(str, strlist, len+1) == 0)
+ return 1;
+ p = memchr(strlist, '\0', listlen);
+ if (!p)
+ return 0; /* malformed strlist.. */
+ listlen -= (p-strlist) + 1;
+ strlist = p + 1;
+ }
+ return 0;
+}
+
+int fdt_node_check_compatible(const void *fdt, int nodeoffset,
+ const char *compatible)
+{
+ const void *prop;
+ int len;
+
+ prop = fdt_getprop(fdt, nodeoffset, "compatible", &len);
+ if (!prop)
+ return len;
+ if (_stringlist_contains(prop, len, compatible))
+ return 0;
+ else
+ return 1;
+}
+
+int fdt_node_offset_by_compatible(const void *fdt, int startoffset,
+ const char *compatible)
+{
+ int offset, err;
+
+ CHECK_HEADER(fdt);
+
+ /* FIXME: The algorithm here is pretty horrible: we scan each
+ * property of a node in fdt_node_check_compatible(), then if
+ * that didn't find what we want, we scan over them again
+ * making our way to the next node. Still it's the easiest to
+ * implement approach; performance can come later. */
+ for (offset = fdt_next_node(fdt, startoffset, NULL);
+ offset >= 0;
+ offset = fdt_next_node(fdt, offset, NULL)) {
+ err = fdt_node_check_compatible(fdt, offset, compatible);
+ if ((err < 0) && (err != -FDT_ERR_NOTFOUND))
+ return err;
+ else if (err == 0)
+ return offset;
+ }
+
+ return offset; /* error from fdt_next_node() */
+}
diff --git a/kvm/libfdt/fdt_rw.c b/kvm/libfdt/fdt_rw.c
new file mode 100644
index 000000000..0df472bc5
--- /dev/null
+++ b/kvm/libfdt/fdt_rw.c
@@ -0,0 +1,467 @@
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ * b) Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "libfdt_env.h"
+
+#include <fdt.h>
+#include <libfdt.h>
+
+#include "libfdt_internal.h"
+
+static int _blocks_misordered(const void *fdt,
+ int mem_rsv_size, int struct_size)
+{
+ return (fdt_off_mem_rsvmap(fdt) < ALIGN(sizeof(struct fdt_header), 8))
+ || (fdt_off_dt_struct(fdt) <
+ (fdt_off_mem_rsvmap(fdt) + mem_rsv_size))
+ || (fdt_off_dt_strings(fdt) <
+ (fdt_off_dt_struct(fdt) + struct_size))
+ || (fdt_totalsize(fdt) <
+ (fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt)));
+}
+
+static int rw_check_header(void *fdt)
+{
+ CHECK_HEADER(fdt);
+
+ if (fdt_version(fdt) < 17)
+ return -FDT_ERR_BADVERSION;
+ if (_blocks_misordered(fdt, sizeof(struct fdt_reserve_entry),
+ fdt_size_dt_struct(fdt)))
+ return -FDT_ERR_BADLAYOUT;
+ if (fdt_version(fdt) > 17)
+ fdt_set_version(fdt, 17);
+
+ return 0;
+}
+
+#define RW_CHECK_HEADER(fdt) \
+ { \
+ int err; \
+ if ((err = rw_check_header(fdt)) != 0) \
+ return err; \
+ }
+
+static inline int _blob_data_size(void *fdt)
+{
+ return fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt);
+}
+
+static int _blob_splice(void *fdt, void *p, int oldlen, int newlen)
+{
+ void *end = fdt + _blob_data_size(fdt);
+
+ if (((p + oldlen) < p) || ((p + oldlen) > end))
+ return -FDT_ERR_BADOFFSET;
+ if ((end - oldlen + newlen) > (fdt + fdt_totalsize(fdt)))
+ return -FDT_ERR_NOSPACE;
+ memmove(p + newlen, p + oldlen, end - p - oldlen);
+ return 0;
+}
+
+static int _blob_splice_mem_rsv(void *fdt, struct fdt_reserve_entry *p,
+ int oldn, int newn)
+{
+ int delta = (newn - oldn) * sizeof(*p);
+ int err;
+ err = _blob_splice(fdt, p, oldn * sizeof(*p), newn * sizeof(*p));
+ if (err)
+ return err;
+ fdt_set_off_dt_struct(fdt, fdt_off_dt_struct(fdt) + delta);
+ fdt_set_off_dt_strings(fdt, fdt_off_dt_strings(fdt) + delta);
+ return 0;
+}
+
+static int _blob_splice_struct(void *fdt, void *p,
+ int oldlen, int newlen)
+{
+ int delta = newlen - oldlen;
+ int err;
+
+ if ((err = _blob_splice(fdt, p, oldlen, newlen)))
+ return err;
+
+ fdt_set_size_dt_struct(fdt, fdt_size_dt_struct(fdt) + delta);
+ fdt_set_off_dt_strings(fdt, fdt_off_dt_strings(fdt) + delta);
+ return 0;
+}
+
+static int _blob_splice_string(void *fdt, int newlen)
+{
+ void *p = fdt + fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt);
+ int err;
+
+ if ((err = _blob_splice(fdt, p, 0, newlen)))
+ return err;
+
+ fdt_set_size_dt_strings(fdt, fdt_size_dt_strings(fdt) + newlen);
+ return 0;
+}
+
+static int _find_add_string(void *fdt, const char *s)
+{
+ char *strtab = (char *)fdt + fdt_off_dt_strings(fdt);
+ const char *p;
+ char *new;
+ int len = strlen(s) + 1;
+ int err;
+
+ p = _fdt_find_string(strtab, fdt_size_dt_strings(fdt), s);
+ if (p)
+ /* found it */
+ return (p - strtab);
+
+ new = strtab + fdt_size_dt_strings(fdt);
+ err = _blob_splice_string(fdt, len);
+ if (err)
+ return err;
+
+ memcpy(new, s, len);
+ return (new - strtab);
+}
+
+int fdt_add_mem_rsv(void *fdt, uint64_t address, uint64_t size)
+{
+ struct fdt_reserve_entry *re;
+ int err;
+
+ if ((err = rw_check_header(fdt)))
+ return err;
+
+ re = _fdt_mem_rsv_w(fdt, fdt_num_mem_rsv(fdt));
+ err = _blob_splice_mem_rsv(fdt, re, 0, 1);
+ if (err)
+ return err;
+
+ re->address = cpu_to_fdt64(address);
+ re->size = cpu_to_fdt64(size);
+ return 0;
+}
+
+int fdt_del_mem_rsv(void *fdt, int n)
+{
+ struct fdt_reserve_entry *re = _fdt_mem_rsv_w(fdt, n);
+ int err;
+
+ if ((err = rw_check_header(fdt)))
+ return err;
+ if (n >= fdt_num_mem_rsv(fdt))
+ return -FDT_ERR_NOTFOUND;
+
+ err = _blob_splice_mem_rsv(fdt, re, 1, 0);
+ if (err)
+ return err;
+ return 0;
+}
+
+static int _resize_property(void *fdt, int nodeoffset, const char *name, int len,
+ struct fdt_property **prop)
+{
+ int oldlen;
+ int err;
+
+ *prop = fdt_get_property_w(fdt, nodeoffset, name, &oldlen);
+ if (! (*prop))
+ return oldlen;
+
+ if ((err = _blob_splice_struct(fdt, (*prop)->data,
+ ALIGN(oldlen, FDT_TAGSIZE),
+ ALIGN(len, FDT_TAGSIZE))))
+ return err;
+
+ (*prop)->len = cpu_to_fdt32(len);
+ return 0;
+}
+
+static int _add_property(void *fdt, int nodeoffset, const char *name, int len,
+ struct fdt_property **prop)
+{
+ uint32_t tag;
+ int proplen;
+ int nextoffset;
+ int namestroff;
+ int err;
+
+ tag = fdt_next_tag(fdt, nodeoffset, &nextoffset);
+ if (tag != FDT_BEGIN_NODE)
+ return -FDT_ERR_BADOFFSET;
+
+ namestroff = _find_add_string(fdt, name);
+ if (namestroff < 0)
+ return namestroff;
+
+ *prop = _fdt_offset_ptr_w(fdt, nextoffset);
+ proplen = sizeof(**prop) + ALIGN(len, FDT_TAGSIZE);
+
+ err = _blob_splice_struct(fdt, *prop, 0, proplen);
+ if (err)
+ return err;
+
+ (*prop)->tag = cpu_to_fdt32(FDT_PROP);
+ (*prop)->nameoff = cpu_to_fdt32(namestroff);
+ (*prop)->len = cpu_to_fdt32(len);
+ return 0;
+}
+
+int fdt_set_name(void *fdt, int nodeoffset, const char *name)
+{
+ char *namep;
+ int oldlen, newlen;
+ int err;
+
+ if ((err = rw_check_header(fdt)))
+ return err;
+
+ namep = (char *)fdt_get_name(fdt, nodeoffset, &oldlen);
+ if (!namep)
+ return oldlen;
+
+ newlen = strlen(name);
+
+ err = _blob_splice_struct(fdt, namep, ALIGN(oldlen+1, FDT_TAGSIZE),
+ ALIGN(newlen+1, FDT_TAGSIZE));
+ if (err)
+ return err;
+
+ memcpy(namep, name, newlen+1);
+ return 0;
+}
+
+int fdt_setprop(void *fdt, int nodeoffset, const char *name,
+ const void *val, int len)
+{
+ struct fdt_property *prop;
+ int err;
+
+ if ((err = rw_check_header(fdt)))
+ return err;
+
+ err = _resize_property(fdt, nodeoffset, name, len, &prop);
+ if (err == -FDT_ERR_NOTFOUND)
+ err = _add_property(fdt, nodeoffset, name, len, &prop);
+ if (err)
+ return err;
+
+ memcpy(prop->data, val, len);
+ return 0;
+}
+
+int fdt_delprop(void *fdt, int nodeoffset, const char *name)
+{
+ struct fdt_property *prop;
+ int len, proplen;
+
+ RW_CHECK_HEADER(fdt);
+
+ prop = fdt_get_property_w(fdt, nodeoffset, name, &len);
+ if (! prop)
+ return len;
+
+ proplen = sizeof(*prop) + ALIGN(len, FDT_TAGSIZE);
+ return _blob_splice_struct(fdt, prop, proplen, 0);
+}
+
+int fdt_add_subnode_namelen(void *fdt, int parentoffset,
+ const char *name, int namelen)
+{
+ struct fdt_node_header *nh;
+ int offset, nextoffset;
+ int nodelen;
+ int err;
+ uint32_t tag;
+ uint32_t *endtag;
+
+ RW_CHECK_HEADER(fdt);
+
+ offset = fdt_subnode_offset_namelen(fdt, parentoffset, name, namelen);
+ if (offset >= 0)
+ return -FDT_ERR_EXISTS;
+ else if (offset != -FDT_ERR_NOTFOUND)
+ return offset;
+
+ /* Try to place the new node after the parent's properties */
+ fdt_next_tag(fdt, parentoffset, &nextoffset); /* skip the BEGIN_NODE */
+ do {
+ offset = nextoffset;
+ tag = fdt_next_tag(fdt, offset, &nextoffset);
+ } while ((tag == FDT_PROP) || (tag == FDT_NOP));
+
+ nh = _fdt_offset_ptr_w(fdt, offset);
+ nodelen = sizeof(*nh) + ALIGN(namelen+1, FDT_TAGSIZE) + FDT_TAGSIZE;
+
+ err = _blob_splice_struct(fdt, nh, 0, nodelen);
+ if (err)
+ return err;
+
+ nh->tag = cpu_to_fdt32(FDT_BEGIN_NODE);
+ memset(nh->name, 0, ALIGN(namelen+1, FDT_TAGSIZE));
+ memcpy(nh->name, name, namelen);
+ endtag = (uint32_t *)((void *)nh + nodelen - FDT_TAGSIZE);
+ *endtag = cpu_to_fdt32(FDT_END_NODE);
+
+ return offset;
+}
+
+int fdt_add_subnode(void *fdt, int parentoffset, const char *name)
+{
+ return fdt_add_subnode_namelen(fdt, parentoffset, name, strlen(name));
+}
+
+int fdt_del_node(void *fdt, int nodeoffset)
+{
+ int endoffset;
+
+ RW_CHECK_HEADER(fdt);
+
+ endoffset = _fdt_node_end_offset(fdt, nodeoffset);
+ if (endoffset < 0)
+ return endoffset;
+
+ return _blob_splice_struct(fdt, _fdt_offset_ptr_w(fdt, nodeoffset),
+ endoffset - nodeoffset, 0);
+}
+
+static void _packblocks(const void *fdt, void *buf,
+ int mem_rsv_size, int struct_size)
+{
+ int mem_rsv_off, struct_off, strings_off;
+
+ mem_rsv_off = ALIGN(sizeof(struct fdt_header), 8);
+ struct_off = mem_rsv_off + mem_rsv_size;
+ strings_off = struct_off + struct_size;
+
+ memmove(buf + mem_rsv_off, fdt + fdt_off_mem_rsvmap(fdt), mem_rsv_size);
+ fdt_set_off_mem_rsvmap(buf, mem_rsv_off);
+
+ memmove(buf + struct_off, fdt + fdt_off_dt_struct(fdt), struct_size);
+ fdt_set_off_dt_struct(buf, struct_off);
+ fdt_set_size_dt_struct(buf, struct_size);
+
+ memmove(buf + strings_off, fdt + fdt_off_dt_strings(fdt),
+ fdt_size_dt_strings(fdt));
+ fdt_set_off_dt_strings(buf, strings_off);
+ fdt_set_size_dt_strings(buf, fdt_size_dt_strings(fdt));
+}
+
+int fdt_open_into(const void *fdt, void *buf, int bufsize)
+{
+ int err;
+ int mem_rsv_size, struct_size;
+ int newsize;
+ void *tmp;
+
+ CHECK_HEADER(fdt);
+
+ mem_rsv_size = (fdt_num_mem_rsv(fdt)+1)
+ * sizeof(struct fdt_reserve_entry);
+
+ if (fdt_version(fdt) >= 17) {
+ struct_size = fdt_size_dt_struct(fdt);
+ } else {
+ struct_size = 0;
+ while (fdt_next_tag(fdt, struct_size, &struct_size) != FDT_END)
+ ;
+ }
+
+ if (!_blocks_misordered(fdt, mem_rsv_size, struct_size)) {
+ /* no further work necessary */
+ err = fdt_move(fdt, buf, bufsize);
+ if (err)
+ return err;
+ fdt_set_version(buf, 17);
+ fdt_set_size_dt_struct(buf, struct_size);
+ fdt_set_totalsize(buf, bufsize);
+ return 0;
+ }
+
+ /* Need to reorder */
+ newsize = ALIGN(sizeof(struct fdt_header), 8) + mem_rsv_size
+ + struct_size + fdt_size_dt_strings(fdt);
+
+ if (bufsize < newsize)
+ return -FDT_ERR_NOSPACE;
+
+ if (((buf + newsize) <= fdt)
+ || (buf >= (fdt + fdt_totalsize(fdt)))) {
+ tmp = buf;
+ } else {
+ tmp = (void *)fdt + fdt_totalsize(fdt);
+ if ((tmp + newsize) > (buf + bufsize))
+ return -FDT_ERR_NOSPACE;
+ }
+
+ _packblocks(fdt, tmp, mem_rsv_size, struct_size);
+ memmove(buf, tmp, newsize);
+
+ fdt_set_magic(buf, FDT_MAGIC);
+ fdt_set_totalsize(buf, bufsize);
+ fdt_set_version(buf, 17);
+ fdt_set_last_comp_version(buf, 16);
+ fdt_set_boot_cpuid_phys(buf, fdt_boot_cpuid_phys(fdt));
+
+ return 0;
+}
+
+int fdt_pack(void *fdt)
+{
+ int mem_rsv_size;
+ int err;
+
+ err = rw_check_header(fdt);
+ if (err)
+ return err;
+
+ mem_rsv_size = (fdt_num_mem_rsv(fdt)+1)
+ * sizeof(struct fdt_reserve_entry);
+ _packblocks(fdt, fdt, mem_rsv_size, fdt_size_dt_struct(fdt));
+ fdt_set_totalsize(fdt, _blob_data_size(fdt));
+
+ return 0;
+}
diff --git a/kvm/libfdt/fdt_strerror.c b/kvm/libfdt/fdt_strerror.c
new file mode 100644
index 000000000..f9d32ef53
--- /dev/null
+++ b/kvm/libfdt/fdt_strerror.c
@@ -0,0 +1,96 @@
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ * b) Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "libfdt_env.h"
+
+#include <fdt.h>
+#include <libfdt.h>
+
+#include "libfdt_internal.h"
+
+struct errtabent {
+ const char *str;
+};
+
+#define ERRTABENT(val) \
+ [(val)] = { .str = #val, }
+
+static struct errtabent errtable[] = {
+ ERRTABENT(FDT_ERR_NOTFOUND),
+ ERRTABENT(FDT_ERR_EXISTS),
+ ERRTABENT(FDT_ERR_NOSPACE),
+
+ ERRTABENT(FDT_ERR_BADOFFSET),
+ ERRTABENT(FDT_ERR_BADPATH),
+ ERRTABENT(FDT_ERR_BADSTATE),
+
+ ERRTABENT(FDT_ERR_TRUNCATED),
+ ERRTABENT(FDT_ERR_BADMAGIC),
+ ERRTABENT(FDT_ERR_BADVERSION),
+ ERRTABENT(FDT_ERR_BADSTRUCTURE),
+ ERRTABENT(FDT_ERR_BADLAYOUT),
+};
+#define ERRTABSIZE (sizeof(errtable) / sizeof(errtable[0]))
+
+const char *fdt_strerror(int errval)
+{
+ if (errval > 0)
+ return "<valid offset/length>";
+ else if (errval == 0)
+ return "<no error>";
+ else if (errval > -ERRTABSIZE) {
+ const char *s = errtable[-errval].str;
+
+ if (s)
+ return s;
+ }
+
+ return "<unknown error>";
+}
diff --git a/kvm/libfdt/fdt_sw.c b/kvm/libfdt/fdt_sw.c
new file mode 100644
index 000000000..dda2de34b
--- /dev/null
+++ b/kvm/libfdt/fdt_sw.c
@@ -0,0 +1,258 @@
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ * b) Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "libfdt_env.h"
+
+#include <fdt.h>
+#include <libfdt.h>
+
+#include "libfdt_internal.h"
+
+static int check_header_sw(void *fdt)
+{
+ if (fdt_magic(fdt) != SW_MAGIC)
+ return -FDT_ERR_BADMAGIC;
+ return 0;
+}
+
+static void *grab_space(void *fdt, int len)
+{
+ int offset = fdt_size_dt_struct(fdt);
+ int spaceleft;
+
+ spaceleft = fdt_totalsize(fdt) - fdt_off_dt_struct(fdt)
+ - fdt_size_dt_strings(fdt);
+
+ if ((offset + len < offset) || (offset + len > spaceleft))
+ return NULL;
+
+ fdt_set_size_dt_struct(fdt, offset + len);
+ return fdt_offset_ptr_w(fdt, offset, len);
+}
+
+int fdt_create(void *buf, int bufsize)
+{
+ void *fdt = buf;
+
+ if (bufsize < sizeof(struct fdt_header))
+ return -FDT_ERR_NOSPACE;
+
+ memset(buf, 0, bufsize);
+
+ fdt_set_magic(fdt, SW_MAGIC);
+ fdt_set_version(fdt, FDT_LAST_SUPPORTED_VERSION);
+ fdt_set_last_comp_version(fdt, FDT_FIRST_SUPPORTED_VERSION);
+ fdt_set_totalsize(fdt, bufsize);
+
+ fdt_set_off_mem_rsvmap(fdt, ALIGN(sizeof(struct fdt_header),
+ sizeof(struct fdt_reserve_entry)));
+ fdt_set_off_dt_struct(fdt, fdt_off_mem_rsvmap(fdt));
+ fdt_set_off_dt_strings(fdt, bufsize);
+
+ return 0;
+}
+
+int fdt_add_reservemap_entry(void *fdt, uint64_t addr, uint64_t size)
+{
+ struct fdt_reserve_entry *re;
+ int err = check_header_sw(fdt);
+ int offset;
+
+ if (err)
+ return err;
+ if (fdt_size_dt_struct(fdt))
+ return -FDT_ERR_BADSTATE;
+
+ offset = fdt_off_dt_struct(fdt);
+ if ((offset + sizeof(*re)) > fdt_totalsize(fdt))
+ return -FDT_ERR_NOSPACE;
+
+ re = (struct fdt_reserve_entry *)(fdt + offset);
+ re->address = cpu_to_fdt64(addr);
+ re->size = cpu_to_fdt64(size);
+
+ fdt_set_off_dt_struct(fdt, offset + sizeof(*re));
+
+ return 0;
+}
+
+int fdt_finish_reservemap(void *fdt)
+{
+ return fdt_add_reservemap_entry(fdt, 0, 0);
+}
+
+int fdt_begin_node(void *fdt, const char *name)
+{
+ struct fdt_node_header *nh;
+ int err = check_header_sw(fdt);
+ int namelen = strlen(name) + 1;
+
+ if (err)
+ return err;
+
+ nh = grab_space(fdt, sizeof(*nh) + ALIGN(namelen, FDT_TAGSIZE));
+ if (! nh)
+ return -FDT_ERR_NOSPACE;
+
+ nh->tag = cpu_to_fdt32(FDT_BEGIN_NODE);
+ memcpy(nh->name, name, namelen);
+ return 0;
+}
+
+int fdt_end_node(void *fdt)
+{
+ uint32_t *en;
+ int err = check_header_sw(fdt);
+
+ if (err)
+ return err;
+
+ en = grab_space(fdt, FDT_TAGSIZE);
+ if (! en)
+ return -FDT_ERR_NOSPACE;
+
+ *en = cpu_to_fdt32(FDT_END_NODE);
+ return 0;
+}
+
+static int find_add_string(void *fdt, const char *s)
+{
+ char *strtab = (char *)fdt + fdt_totalsize(fdt);
+ const char *p;
+ int strtabsize = fdt_size_dt_strings(fdt);
+ int len = strlen(s) + 1;
+ int struct_top, offset;
+
+ p = _fdt_find_string(strtab - strtabsize, strtabsize, s);
+ if (p)
+ return p - strtab;
+
+ /* Add it */
+ offset = -strtabsize - len;
+ struct_top = fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt);
+ if (fdt_totalsize(fdt) + offset < struct_top)
+ return 0; /* no more room :( */
+
+ memcpy(strtab + offset, s, len);
+ fdt_set_size_dt_strings(fdt, strtabsize + len);
+ return offset;
+}
+
+int fdt_property(void *fdt, const char *name, const void *val, int len)
+{
+ struct fdt_property *prop;
+ int err = check_header_sw(fdt);
+ int nameoff;
+
+ if (err)
+ return err;
+
+ nameoff = find_add_string(fdt, name);
+ if (nameoff == 0)
+ return -FDT_ERR_NOSPACE;
+
+ prop = grab_space(fdt, sizeof(*prop) + ALIGN(len, FDT_TAGSIZE));
+ if (! prop)
+ return -FDT_ERR_NOSPACE;
+
+ prop->tag = cpu_to_fdt32(FDT_PROP);
+ prop->nameoff = cpu_to_fdt32(nameoff);
+ prop->len = cpu_to_fdt32(len);
+ memcpy(prop->data, val, len);
+ return 0;
+}
+
+int fdt_finish(void *fdt)
+{
+ int err = check_header_sw(fdt);
+ char *p = (char *)fdt;
+ uint32_t *end;
+ int oldstroffset, newstroffset;
+ uint32_t tag;
+ int offset, nextoffset;
+
+ if (err)
+ return err;
+
+ /* Add terminator */
+ end = grab_space(fdt, sizeof(*end));
+ if (! end)
+ return -FDT_ERR_NOSPACE;
+ *end = cpu_to_fdt32(FDT_END);
+
+ /* Relocate the string table */
+ oldstroffset = fdt_totalsize(fdt) - fdt_size_dt_strings(fdt);
+ newstroffset = fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt);
+ memmove(p + newstroffset, p + oldstroffset, fdt_size_dt_strings(fdt));
+ fdt_set_off_dt_strings(fdt, newstroffset);
+
+ /* Walk the structure, correcting string offsets */
+ offset = 0;
+ while ((tag = fdt_next_tag(fdt, offset, &nextoffset)) != FDT_END) {
+ if (tag == FDT_PROP) {
+ struct fdt_property *prop =
+ fdt_offset_ptr_w(fdt, offset, sizeof(*prop));
+ int nameoff;
+
+ if (! prop)
+ return -FDT_ERR_BADSTRUCTURE;
+
+ nameoff = fdt32_to_cpu(prop->nameoff);
+ nameoff += fdt_size_dt_strings(fdt);
+ prop->nameoff = cpu_to_fdt32(nameoff);
+ }
+ offset = nextoffset;
+ }
+
+ /* Finally, adjust the header */
+ fdt_set_totalsize(fdt, newstroffset + fdt_size_dt_strings(fdt));
+ fdt_set_magic(fdt, FDT_MAGIC);
+ return 0;
+}
diff --git a/kvm/libfdt/fdt_wip.c b/kvm/libfdt/fdt_wip.c
new file mode 100644
index 000000000..88e24b831
--- /dev/null
+++ b/kvm/libfdt/fdt_wip.c
@@ -0,0 +1,144 @@
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ * b) Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "libfdt_env.h"
+
+#include <fdt.h>
+#include <libfdt.h>
+
+#include "libfdt_internal.h"
+
+int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name,
+ const void *val, int len)
+{
+ void *propval;
+ int proplen;
+
+ propval = fdt_getprop_w(fdt, nodeoffset, name, &proplen);
+ if (! propval)
+ return proplen;
+
+ if (proplen != len)
+ return -FDT_ERR_NOSPACE;
+
+ memcpy(propval, val, len);
+ return 0;
+}
+
+static void nop_region(void *start, int len)
+{
+ uint32_t *p;
+
+ for (p = start; (void *)p < (start + len); p++)
+ *p = cpu_to_fdt32(FDT_NOP);
+}
+
+int fdt_nop_property(void *fdt, int nodeoffset, const char *name)
+{
+ struct fdt_property *prop;
+ int len;
+
+ prop = fdt_get_property_w(fdt, nodeoffset, name, &len);
+ if (! prop)
+ return len;
+
+ nop_region(prop, len + sizeof(*prop));
+
+ return 0;
+}
+
+int _fdt_node_end_offset(void *fdt, int nodeoffset)
+{
+ int level = 0;
+ uint32_t tag;
+ int offset, nextoffset;
+
+ tag = fdt_next_tag(fdt, nodeoffset, &nextoffset);
+ if (tag != FDT_BEGIN_NODE)
+ return -FDT_ERR_BADOFFSET;
+ do {
+ offset = nextoffset;
+ tag = fdt_next_tag(fdt, offset, &nextoffset);
+
+ switch (tag) {
+ case FDT_END:
+ return offset;
+
+ case FDT_BEGIN_NODE:
+ level++;
+ break;
+
+ case FDT_END_NODE:
+ level--;
+ break;
+
+ case FDT_PROP:
+ case FDT_NOP:
+ break;
+
+ default:
+ return -FDT_ERR_BADSTRUCTURE;
+ }
+ } while (level >= 0);
+
+ return nextoffset;
+}
+
+int fdt_nop_node(void *fdt, int nodeoffset)
+{
+ int endoffset;
+
+ endoffset = _fdt_node_end_offset(fdt, nodeoffset);
+ if (endoffset < 0)
+ return endoffset;
+
+ nop_region(fdt_offset_ptr_w(fdt, nodeoffset, 0), endoffset - nodeoffset);
+ return 0;
+}
diff --git a/kvm/libfdt/libfdt.h b/kvm/libfdt/libfdt.h
new file mode 100644
index 000000000..8645de082
--- /dev/null
+++ b/kvm/libfdt/libfdt.h
@@ -0,0 +1,1076 @@
+#ifndef _LIBFDT_H
+#define _LIBFDT_H
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ * b) Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <libfdt_env.h>
+#include <fdt.h>
+
+#define FDT_FIRST_SUPPORTED_VERSION 0x10
+#define FDT_LAST_SUPPORTED_VERSION 0x11
+
+/* Error codes: informative error codes */
+#define FDT_ERR_NOTFOUND 1
+ /* FDT_ERR_NOTFOUND: The requested node or property does not exist */
+#define FDT_ERR_EXISTS 2
+ /* FDT_ERR_EXISTS: Attemped to create a node or property which
+ * already exists */
+#define FDT_ERR_NOSPACE 3
+ /* FDT_ERR_NOSPACE: Operation needed to expand the device
+ * tree, but its buffer did not have sufficient space to
+ * contain the expanded tree. Use fdt_open_into() to move the
+ * device tree to a buffer with more space. */
+
+/* Error codes: codes for bad parameters */
+#define FDT_ERR_BADOFFSET 4
+ /* FDT_ERR_BADOFFSET: Function was passed a structure block
+ * offset which is out-of-bounds, or which points to an
+ * unsuitable part of the structure for the operation. */
+#define FDT_ERR_BADPATH 5
+ /* FDT_ERR_BADPATH: Function was passed a badly formatted path
+ * (e.g. missing a leading / for a function which requires an
+ * absolute path) */
+#define FDT_ERR_BADPHANDLE 6
+ /* FDT_ERR_BADPHANDLE: Function was passed an invalid phandle
+ * value. phandle values of 0 and -1 are not permitted. */
+#define FDT_ERR_BADSTATE 7
+ /* FDT_ERR_BADSTATE: Function was passed an incomplete device
+ * tree created by the sequential-write functions, which is
+ * not sufficiently complete for the requested operation. */
+
+/* Error codes: codes for bad device tree blobs */
+#define FDT_ERR_TRUNCATED 8
+ /* FDT_ERR_TRUNCATED: Structure block of the given device tree
+ * ends without an FDT_END tag. */
+#define FDT_ERR_BADMAGIC 9
+ /* FDT_ERR_BADMAGIC: Given "device tree" appears not to be a
+ * device tree at all - it is missing the flattened device
+ * tree magic number. */
+#define FDT_ERR_BADVERSION 10
+ /* FDT_ERR_BADVERSION: Given device tree has a version which
+ * can't be handled by the requested operation. For
+ * read-write functions, this may mean that fdt_open_into() is
+ * required to convert the tree to the expected version. */
+#define FDT_ERR_BADSTRUCTURE 11
+ /* FDT_ERR_BADSTRUCTURE: Given device tree has a corrupt
+ * structure block or other serious error (e.g. misnested
+ * nodes, or subnodes preceding properties). */
+#define FDT_ERR_BADLAYOUT 12
+ /* FDT_ERR_BADLAYOUT: For read-write functions, the given
+ * device tree has it's sub-blocks in an order that the
+ * function can't handle (memory reserve map, then structure,
+ * then strings). Use fdt_open_into() to reorganize the tree
+ * into a form suitable for the read-write operations. */
+
+/* "Can't happen" error indicating a bug in libfdt */
+#define FDT_ERR_INTERNAL 13
+ /* FDT_ERR_INTERNAL: libfdt has failed an internal assertion.
+ * Should never be returned, if it is, it indicates a bug in
+ * libfdt itself. */
+
+#define FDT_ERR_MAX 13
+
+/**********************************************************************/
+/* Low-level functions (you probably don't need these) */
+/**********************************************************************/
+
+const void *fdt_offset_ptr(const void *fdt, int offset, int checklen);
+static inline void *fdt_offset_ptr_w(void *fdt, int offset, int checklen)
+{
+ return (void *)fdt_offset_ptr(fdt, offset, checklen);
+}
+
+uint32_t fdt_next_tag(const void *fdt, int offset, int *nextoffset);
+
+/**********************************************************************/
+/* Traversal functions */
+/**********************************************************************/
+
+int fdt_next_node(const void *fdt, int offset, int *depth);
+
+/**********************************************************************/
+/* General functions */
+/**********************************************************************/
+
+#define fdt_get_header(fdt, field) \
+ (fdt32_to_cpu(((const struct fdt_header *)(fdt))->field))
+#define fdt_magic(fdt) (fdt_get_header(fdt, magic))
+#define fdt_totalsize(fdt) (fdt_get_header(fdt, totalsize))
+#define fdt_off_dt_struct(fdt) (fdt_get_header(fdt, off_dt_struct))
+#define fdt_off_dt_strings(fdt) (fdt_get_header(fdt, off_dt_strings))
+#define fdt_off_mem_rsvmap(fdt) (fdt_get_header(fdt, off_mem_rsvmap))
+#define fdt_version(fdt) (fdt_get_header(fdt, version))
+#define fdt_last_comp_version(fdt) (fdt_get_header(fdt, last_comp_version))
+#define fdt_boot_cpuid_phys(fdt) (fdt_get_header(fdt, boot_cpuid_phys))
+#define fdt_size_dt_strings(fdt) (fdt_get_header(fdt, size_dt_strings))
+#define fdt_size_dt_struct(fdt) (fdt_get_header(fdt, size_dt_struct))
+
+#define __fdt_set_hdr(name) \
+ static inline void fdt_set_##name(void *fdt, uint32_t val) \
+ { \
+ struct fdt_header *fdth = fdt; \
+ fdth->name = cpu_to_fdt32(val); \
+ }
+__fdt_set_hdr(magic);
+__fdt_set_hdr(totalsize);
+__fdt_set_hdr(off_dt_struct);
+__fdt_set_hdr(off_dt_strings);
+__fdt_set_hdr(off_mem_rsvmap);
+__fdt_set_hdr(version);
+__fdt_set_hdr(last_comp_version);
+__fdt_set_hdr(boot_cpuid_phys);
+__fdt_set_hdr(size_dt_strings);
+__fdt_set_hdr(size_dt_struct);
+#undef __fdt_set_hdr
+
+/**
+ * fdt_check_header - sanity check a device tree or possible device tree
+ * @fdt: pointer to data which might be a flattened device tree
+ *
+ * fdt_check_header() checks that the given buffer contains what
+ * appears to be a flattened device tree with sane information in its
+ * header.
+ *
+ * returns:
+ * 0, if the buffer appears to contain a valid device tree
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE, standard meanings, as above
+ */
+int fdt_check_header(const void *fdt);
+
+/**
+ * fdt_move - move a device tree around in memory
+ * @fdt: pointer to the device tree to move
+ * @buf: pointer to memory where the device is to be moved
+ * @bufsize: size of the memory space at buf
+ *
+ * fdt_move() relocates, if possible, the device tree blob located at
+ * fdt to the buffer at buf of size bufsize. The buffer may overlap
+ * with the existing device tree blob at fdt. Therefore,
+ * fdt_move(fdt, fdt, fdt_totalsize(fdt))
+ * should always succeed.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, bufsize is insufficient to contain the device tree
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE, standard meanings
+ */
+int fdt_move(const void *fdt, void *buf, int bufsize);
+
+/**********************************************************************/
+/* Read-only functions */
+/**********************************************************************/
+
+/**
+ * fdt_string - retreive a string from the strings block of a device tree
+ * @fdt: pointer to the device tree blob
+ * @stroffset: offset of the string within the strings block (native endian)
+ *
+ * fdt_string() retrieves a pointer to a single string from the
+ * strings block of the device tree blob at fdt.
+ *
+ * returns:
+ * a pointer to the string, on success
+ * NULL, if stroffset is out of bounds
+ */
+const char *fdt_string(const void *fdt, int stroffset);
+
+/**
+ * fdt_num_mem_rsv - retreive the number of memory reserve map entries
+ * @fdt: pointer to the device tree blob
+ *
+ * Returns the number of entries in the device tree blob's memory
+ * reservation map. This does not include the terminating 0,0 entry
+ * or any other (0,0) entries reserved for expansion.
+ *
+ * returns:
+ * the number of entries
+ */
+int fdt_num_mem_rsv(const void *fdt);
+
+/**
+ * fdt_get_mem_rsv - retreive one memory reserve map entry
+ * @fdt: pointer to the device tree blob
+ * @address, @size: pointers to 64-bit variables
+ *
+ * On success, *address and *size will contain the address and size of
+ * the n-th reserve map entry from the device tree blob, in
+ * native-endian format.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE, standard meanings
+ */
+int fdt_get_mem_rsv(const void *fdt, int n, uint64_t *address, uint64_t *size);
+
+/**
+ * fdt_subnode_offset_namelen - find a subnode based on substring
+ * @fdt: pointer to the device tree blob
+ * @parentoffset: structure block offset of a node
+ * @name: name of the subnode to locate
+ * @namelen: number of characters of name to consider
+ *
+ * Identical to fdt_subnode_offset(), but only examine the first
+ * namelen characters of name for matching the subnode name. This is
+ * useful for finding subnodes based on a portion of a larger string,
+ * such as a full path.
+ */
+int fdt_subnode_offset_namelen(const void *fdt, int parentoffset,
+ const char *name, int namelen);
+/**
+ * fdt_subnode_offset - find a subnode of a given node
+ * @fdt: pointer to the device tree blob
+ * @parentoffset: structure block offset of a node
+ * @name: name of the subnode to locate
+ *
+ * fdt_subnode_offset() finds a subnode of the node at structure block
+ * offset parentoffset with the given name. name may include a unit
+ * address, in which case fdt_subnode_offset() will find the subnode
+ * with that unit address, or the unit address may be omitted, in
+ * which case fdt_subnode_offset() will find an arbitrary subnode
+ * whose name excluding unit address matches the given name.
+ *
+ * returns:
+ * structure block offset of the requested subnode (>=0), on success
+ * -FDT_ERR_NOTFOUND, if the requested subnode does not exist
+ * -FDT_ERR_BADOFFSET, if parentoffset did not point to an FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings.
+ */
+int fdt_subnode_offset(const void *fdt, int parentoffset, const char *name);
+
+/**
+ * fdt_path_offset - find a tree node by its full path
+ * @fdt: pointer to the device tree blob
+ * @path: full path of the node to locate
+ *
+ * fdt_path_offset() finds a node of a given path in the device tree.
+ * Each path component may omit the unit address portion, but the
+ * results of this are undefined if any such path component is
+ * ambiguous (that is if there are multiple nodes at the relevant
+ * level matching the given component, differentiated only by unit
+ * address).
+ *
+ * returns:
+ * structure block offset of the node with the requested path (>=0), on success
+ * -FDT_ERR_BADPATH, given path does not begin with '/' or is invalid
+ * -FDT_ERR_NOTFOUND, if the requested node does not exist
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings.
+ */
+int fdt_path_offset(const void *fdt, const char *path);
+
+/**
+ * fdt_get_name - retreive the name of a given node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: structure block offset of the starting node
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * fdt_get_name() retrieves the name (including unit address) of the
+ * device tree node at structure block offset nodeoffset. If lenp is
+ * non-NULL, the length of this name is also returned, in the integer
+ * pointed to by lenp.
+ *
+ * returns:
+ * pointer to the node's name, on success
+ * If lenp is non-NULL, *lenp contains the length of that name (>=0)
+ * NULL, on error
+ * if lenp is non-NULL *lenp contains an error code (<0):
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE, standard meanings
+ */
+const char *fdt_get_name(const void *fdt, int nodeoffset, int *lenp);
+
+/**
+ * fdt_get_property - find a given property in a given node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to find
+ * @name: name of the property to find
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * fdt_get_property() retrieves a pointer to the fdt_property
+ * structure within the device tree blob corresponding to the property
+ * named 'name' of the node at offset nodeoffset. If lenp is
+ * non-NULL, the length of the property value also returned, in the
+ * integer pointed to by lenp.
+ *
+ * returns:
+ * pointer to the structure representing the property
+ * if lenp is non-NULL, *lenp contains the length of the property
+ * value (>=0)
+ * NULL, on error
+ * if lenp is non-NULL, *lenp contains an error code (<0):
+ * -FDT_ERR_NOTFOUND, node does not have named property
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+const struct fdt_property *fdt_get_property(const void *fdt, int nodeoffset,
+ const char *name, int *lenp);
+static inline struct fdt_property *fdt_get_property_w(void *fdt, int nodeoffset,
+ const char *name,
+ int *lenp)
+{
+ return (struct fdt_property *)fdt_get_property(fdt, nodeoffset,
+ name, lenp);
+}
+
+/**
+ * fdt_getprop - retrieve the value of a given property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to find
+ * @name: name of the property to find
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * fdt_getprop() retrieves a pointer to the value of the property
+ * named 'name' of the node at offset nodeoffset (this will be a
+ * pointer to within the device blob itself, not a copy of the value).
+ * If lenp is non-NULL, the length of the property value also
+ * returned, in the integer pointed to by lenp.
+ *
+ * returns:
+ * pointer to the property's value
+ * if lenp is non-NULL, *lenp contains the length of the property
+ * value (>=0)
+ * NULL, on error
+ * if lenp is non-NULL, *lenp contains an error code (<0):
+ * -FDT_ERR_NOTFOUND, node does not have named property
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+const void *fdt_getprop(const void *fdt, int nodeoffset,
+ const char *name, int *lenp);
+static inline void *fdt_getprop_w(void *fdt, int nodeoffset,
+ const char *name, int *lenp)
+{
+ return (void *)fdt_getprop(fdt, nodeoffset, name, lenp);
+}
+
+/**
+ * fdt_get_phandle - retreive the phandle of a given node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: structure block offset of the node
+ *
+ * fdt_get_phandle() retrieves the phandle of the device tree node at
+ * structure block offset nodeoffset.
+ *
+ * returns:
+ * the phandle of the node at nodeoffset, on succes (!= 0, != -1)
+ * 0, if the node has no phandle, or another error occurs
+ */
+uint32_t fdt_get_phandle(const void *fdt, int nodeoffset);
+
+/**
+ * fdt_get_path - determine the full path of a node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose path to find
+ * @buf: character buffer to contain the returned path (will be overwritten)
+ * @buflen: size of the character buffer at buf
+ *
+ * fdt_get_path() computes the full path of the node at offset
+ * nodeoffset, and records that path in the buffer at buf.
+ *
+ * NOTE: This function is expensive, as it must scan the device tree
+ * structure from the start to nodeoffset.
+ *
+ * returns:
+ * 0, on success
+ * buf contains the absolute path of the node at
+ * nodeoffset, as a NUL-terminated string.
+ * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
+ * -FDT_ERR_NOSPACE, the path of the given node is longer than (bufsize-1)
+ * characters and will not fit in the given buffer.
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_get_path(const void *fdt, int nodeoffset, char *buf, int buflen);
+
+/**
+ * fdt_supernode_atdepth_offset - find a specific ancestor of a node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose parent to find
+ * @supernodedepth: depth of the ancestor to find
+ * @nodedepth: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * fdt_supernode_atdepth_offset() finds an ancestor of the given node
+ * at a specific depth from the root (where the root itself has depth
+ * 0, its immediate subnodes depth 1 and so forth). So
+ * fdt_supernode_atdepth_offset(fdt, nodeoffset, 0, NULL);
+ * will always return 0, the offset of the root node. If the node at
+ * nodeoffset has depth D, then:
+ * fdt_supernode_atdepth_offset(fdt, nodeoffset, D, NULL);
+ * will return nodeoffset itself.
+ *
+ * NOTE: This function is expensive, as it must scan the device tree
+ * structure from the start to nodeoffset.
+ *
+ * returns:
+
+ * structure block offset of the node at node offset's ancestor
+ * of depth supernodedepth (>=0), on success
+ * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
+* -FDT_ERR_NOTFOUND, supernodedepth was greater than the depth of nodeoffset
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_supernode_atdepth_offset(const void *fdt, int nodeoffset,
+ int supernodedepth, int *nodedepth);
+
+/**
+ * fdt_node_depth - find the depth of a given node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose parent to find
+ *
+ * fdt_node_depth() finds the depth of a given node. The root node
+ * has depth 0, its immediate subnodes depth 1 and so forth.
+ *
+ * NOTE: This function is expensive, as it must scan the device tree
+ * structure from the start to nodeoffset.
+ *
+ * returns:
+ * depth of the node at nodeoffset (>=0), on success
+ * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_node_depth(const void *fdt, int nodeoffset);
+
+/**
+ * fdt_parent_offset - find the parent of a given node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose parent to find
+ *
+ * fdt_parent_offset() locates the parent node of a given node (that
+ * is, it finds the offset of the node which contains the node at
+ * nodeoffset as a subnode).
+ *
+ * NOTE: This function is expensive, as it must scan the device tree
+ * structure from the start to nodeoffset, *twice*.
+ *
+ * returns:
+ * stucture block offset of the parent of the node at nodeoffset
+ * (>=0), on success
+ * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_parent_offset(const void *fdt, int nodeoffset);
+
+/**
+ * fdt_node_offset_by_prop_value - find nodes with a given property value
+ * @fdt: pointer to the device tree blob
+ * @startoffset: only find nodes after this offset
+ * @propname: property name to check
+ * @propval: property value to search for
+ * @proplen: length of the value in propval
+ *
+ * fdt_node_offset_by_prop_value() returns the offset of the first
+ * node after startoffset, which has a property named propname whose
+ * value is of length proplen and has value equal to propval; or if
+ * startoffset is -1, the very first such node in the tree.
+ *
+ * To iterate through all nodes matching the criterion, the following
+ * idiom can be used:
+ * offset = fdt_node_offset_by_prop_value(fdt, -1, propname,
+ * propval, proplen);
+ * while (offset != -FDT_ERR_NOTFOUND) {
+ * // other code here
+ * offset = fdt_node_offset_by_prop_value(fdt, offset, propname,
+ * propval, proplen);
+ * }
+ *
+ * Note the -1 in the first call to the function, if 0 is used here
+ * instead, the function will never locate the root node, even if it
+ * matches the criterion.
+ *
+ * returns:
+ * structure block offset of the located node (>= 0, >startoffset),
+ * on success
+ * -FDT_ERR_NOTFOUND, no node matching the criterion exists in the
+ * tree after startoffset
+ * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_node_offset_by_prop_value(const void *fdt, int startoffset,
+ const char *propname,
+ const void *propval, int proplen);
+
+/**
+ * fdt_node_offset_by_phandle - find the node with a given phandle
+ * @fdt: pointer to the device tree blob
+ * @phandle: phandle value
+ *
+ * fdt_node_offset_by_prop_value() returns the offset of the node
+ * which has the given phandle value. If there is more than one node
+ * in the tree with the given phandle (an invalid tree), results are
+ * undefined.
+ *
+ * returns:
+ * structure block offset of the located node (>= 0), on success
+ * -FDT_ERR_NOTFOUND, no node with that phandle exists
+ * -FDT_ERR_BADPHANDLE, given phandle value was invalid (0 or -1)
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_node_offset_by_phandle(const void *fdt, uint32_t phandle);
+
+/**
+ * fdt_node_check_compatible: check a node's compatible property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of a tree node
+ * @compatible: string to match against
+ *
+ *
+ * fdt_node_check_compatible() returns 0 if the given node contains a
+ * 'compatible' property with the given string as one of its elements,
+ * it returns non-zero otherwise, or on error.
+ *
+ * returns:
+ * 0, if the node has a 'compatible' property listing the given string
+ * 1, if the node has a 'compatible' property, but it does not list
+ * the given string
+ * -FDT_ERR_NOTFOUND, if the given node has no 'compatible' property
+ * -FDT_ERR_BADOFFSET, if nodeoffset does not refer to a BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_node_check_compatible(const void *fdt, int nodeoffset,
+ const char *compatible);
+
+/**
+ * fdt_node_offset_by_compatible - find nodes with a given 'compatible' value
+ * @fdt: pointer to the device tree blob
+ * @startoffset: only find nodes after this offset
+ * @compatible: 'compatible' string to match against
+ *
+ * fdt_node_offset_by_compatible() returns the offset of the first
+ * node after startoffset, which has a 'compatible' property which
+ * lists the given compatible string; or if startoffset is -1, the
+ * very first such node in the tree.
+ *
+ * To iterate through all nodes matching the criterion, the following
+ * idiom can be used:
+ * offset = fdt_node_offset_by_compatible(fdt, -1, compatible);
+ * while (offset != -FDT_ERR_NOTFOUND) {
+ * // other code here
+ * offset = fdt_node_offset_by_compatible(fdt, offset, compatible);
+ * }
+ *
+ * Note the -1 in the first call to the function, if 0 is used here
+ * instead, the function will never locate the root node, even if it
+ * matches the criterion.
+ *
+ * returns:
+ * structure block offset of the located node (>= 0, >startoffset),
+ * on success
+ * -FDT_ERR_NOTFOUND, no node matching the criterion exists in the
+ * tree after startoffset
+ * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_node_offset_by_compatible(const void *fdt, int startoffset,
+ const char *compatible);
+
+/**********************************************************************/
+/* Write-in-place functions */
+/**********************************************************************/
+
+/**
+ * fdt_setprop_inplace - change a property's value, but not its size
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: pointer to data to replace the property value with
+ * @len: length of the property value
+ *
+ * fdt_setprop_inplace() replaces the value of a given property with
+ * the data in val, of length len. This function cannot change the
+ * size of a property, and so will only work if len is equal to the
+ * current length of the property.
+ *
+ * This function will alter only the bytes in the blob which contain
+ * the given property value, and will not alter or move any other part
+ * of the tree.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, if len is not equal to the property's current length
+ * -FDT_ERR_NOTFOUND, node does not have the named property
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name,
+ const void *val, int len);
+
+/**
+ * fdt_setprop_inplace_cell - change the value of a single-cell property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: cell (32-bit integer) value to replace the property with
+ *
+ * fdt_setprop_inplace_cell() replaces the value of a given property
+ * with the 32-bit integer cell value in val, converting val to
+ * big-endian if necessary. This function cannot change the size of a
+ * property, and so will only work if the property already exists and
+ * has length 4.
+ *
+ * This function will alter only the bytes in the blob which contain
+ * the given property value, and will not alter or move any other part
+ * of the tree.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, if the property's length is not equal to 4
+ * -FDT_ERR_NOTFOUND, node does not have the named property
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+static inline int fdt_setprop_inplace_cell(void *fdt, int nodeoffset,
+ const char *name, uint32_t val)
+{
+ val = cpu_to_fdt32(val);
+ return fdt_setprop_inplace(fdt, nodeoffset, name, &val, sizeof(val));
+}
+
+/**
+ * fdt_nop_property - replace a property with nop tags
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to nop
+ * @name: name of the property to nop
+ *
+ * fdt_nop_property() will replace a given property's representation
+ * in the blob with FDT_NOP tags, effectively removing it from the
+ * tree.
+ *
+ * This function will alter only the bytes in the blob which contain
+ * the property, and will not alter or move any other part of the
+ * tree.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOTFOUND, node does not have the named property
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_nop_property(void *fdt, int nodeoffset, const char *name);
+
+/**
+ * fdt_nop_node - replace a node (subtree) with nop tags
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node to nop
+ *
+ * fdt_nop_node() will replace a given node's representation in the
+ * blob, including all its subnodes, if any, with FDT_NOP tags,
+ * effectively removing it from the tree.
+ *
+ * This function will alter only the bytes in the blob which contain
+ * the node and its properties and subnodes, and will not alter or
+ * move any other part of the tree.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_nop_node(void *fdt, int nodeoffset);
+
+/**********************************************************************/
+/* Sequential write functions */
+/**********************************************************************/
+
+int fdt_create(void *buf, int bufsize);
+int fdt_add_reservemap_entry(void *fdt, uint64_t addr, uint64_t size);
+int fdt_finish_reservemap(void *fdt);
+int fdt_begin_node(void *fdt, const char *name);
+int fdt_property(void *fdt, const char *name, const void *val, int len);
+static inline int fdt_property_cell(void *fdt, const char *name, uint32_t val)
+{
+ val = cpu_to_fdt32(val);
+ return fdt_property(fdt, name, &val, sizeof(val));
+}
+#define fdt_property_string(fdt, name, str) \
+ fdt_property(fdt, name, str, strlen(str)+1)
+int fdt_end_node(void *fdt);
+int fdt_finish(void *fdt);
+
+/**********************************************************************/
+/* Read-write functions */
+/**********************************************************************/
+
+int fdt_open_into(const void *fdt, void *buf, int bufsize);
+int fdt_pack(void *fdt);
+
+/**
+ * fdt_add_mem_rsv - add one memory reserve map entry
+ * @fdt: pointer to the device tree blob
+ * @addres, @size: 64-bit values (native endian)
+ *
+ * Adds a reserve map entry to the given blob reserving a region at
+ * address address of length size.
+ *
+ * This function will insert data into the reserve map and will
+ * therfore change the indexes of some entries in the table.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain the new reservation entry
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_add_mem_rsv(void *fdt, uint64_t address, uint64_t size);
+
+/**
+ * fdt_del_mem_rsv - remove a memory reserve map entry
+ * @fdt: pointer to the device tree blob
+ * @n: entry to remove
+ *
+ * fdt_del_mem_rsv() removes the n-th memory reserve map entry from
+ * the blob.
+ *
+ * This function will delete data from the reservation table and will
+ * therfore change the indexes of some entries in the table.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOTFOUND, there is no entry of the given index (i.e. there
+ * are less than n+1 reserve map entries)
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_del_mem_rsv(void *fdt, int n);
+
+/**
+ * fdt_set_name - change the name of a given node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: structure block offset of a node
+ * @name: name to give the node
+ *
+ * fdt_set_name() replaces the name (including unit address, if any)
+ * of the given node with the given string. NOTE: this function can't
+ * efficiently check if the new name is unique amongst the given
+ * node's siblings; results are undefined if this function is invoked
+ * with a name equal to one of the given node's siblings.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob
+ * to contain the new name
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE, standard meanings
+ */
+int fdt_set_name(void *fdt, int nodeoffset, const char *name);
+
+/**
+ * fdt_setprop - create or change a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: pointer to data to set the property value to
+ * @len: length of the property value
+ *
+ * fdt_setprop() sets the value of the named property in the given
+ * node to the given value and length, creeating the property if it
+ * does not already exist.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain the new property value
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_setprop(void *fdt, int nodeoffset, const char *name,
+ const void *val, int len);
+
+/**
+ * fdt_setprop_cell - set a property to a single cell value
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: 32-bit integer value for the property (native endian)
+ *
+ * fdt_setprop_cell() sets the value of the named property in the
+ * given node to the given cell value (converting to big-endian if
+ * necessary), or creates a new property with that value if it does
+ * not already exist.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain the new property value
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+static inline int fdt_setprop_cell(void *fdt, int nodeoffset, const char *name,
+ uint32_t val)
+{
+ val = cpu_to_fdt32(val);
+ return fdt_setprop(fdt, nodeoffset, name, &val, sizeof(val));
+}
+
+/**
+ * fdt_setprop_string - set a property to a string value
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @str: string value for the property
+ *
+ * fdt_setprop_string() sets the value of the named property in the
+ * given node to the given string value (using the length of the
+ * string to determine the new length of the property), or creates a
+ * new property with that value if it does not already exist.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain the new property value
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+#define fdt_setprop_string(fdt, nodeoffset, name, str) \
+ fdt_setprop((fdt), (nodeoffset), (name), (str), strlen(str)+1)
+
+/**
+ * fdt_delprop - delete a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to nop
+ * @name: name of the property to nop
+ *
+ * fdt_del_property() will delete the given property.
+ *
+ * This function will delete data from the blob, and will therefore
+ * change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOTFOUND, node does not have the named property
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_delprop(void *fdt, int nodeoffset, const char *name);
+
+/**
+ * fdt_add_subnode_namelen - creates a new node based on substring
+ * @fdt: pointer to the device tree blob
+ * @parentoffset: structure block offset of a node
+ * @name: name of the subnode to locate
+ * @namelen: number of characters of name to consider
+ *
+ * Identical to fdt_add_subnode(), but use only the first namelen
+ * characters of name as the name of the new node. This is useful for
+ * creating subnodes based on a portion of a larger string, such as a
+ * full path.
+ */
+int fdt_add_subnode_namelen(void *fdt, int parentoffset,
+ const char *name, int namelen);
+
+/**
+ * fdt_add_subnode - creates a new node
+ * @fdt: pointer to the device tree blob
+ * @parentoffset: structure block offset of a node
+ * @name: name of the subnode to locate
+ *
+ * fdt_add_subnode() creates a new node as a subnode of the node at
+ * structure block offset parentoffset, with the given name (which
+ * should include the unit address, if any).
+ *
+ * This function will insert data into the blob, and will therefore
+ * change the offsets of some existing nodes.
+
+ * returns:
+ * structure block offset of the created nodeequested subnode (>=0), on success
+ * -FDT_ERR_NOTFOUND, if the requested subnode does not exist
+ * -FDT_ERR_BADOFFSET, if parentoffset did not point to an FDT_BEGIN_NODE tag
+ * -FDT_ERR_EXISTS, if the node at parentoffset already has a subnode of
+ * the given name
+ * -FDT_ERR_NOSPACE, if there is insufficient free space in the
+ * blob to contain the new node
+ * -FDT_ERR_NOSPACE
+ * -FDT_ERR_BADLAYOUT
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings.
+ */
+int fdt_add_subnode(void *fdt, int parentoffset, const char *name);
+
+/**
+ * fdt_del_node - delete a node (subtree)
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node to nop
+ *
+ * fdt_del_node() will remove the given node, including all its
+ * subnodes if any, from the blob.
+ *
+ * This function will delete data from the blob, and will therefore
+ * change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_del_node(void *fdt, int nodeoffset);
+
+/**********************************************************************/
+/* Debugging / informational functions */
+/**********************************************************************/
+
+const char *fdt_strerror(int errval);
+
+#endif /* _LIBFDT_H */
diff --git a/kvm/libfdt/libfdt_env.h b/kvm/libfdt/libfdt_env.h
new file mode 100644
index 000000000..59f2536d2
--- /dev/null
+++ b/kvm/libfdt/libfdt_env.h
@@ -0,0 +1,22 @@
+#ifndef _LIBFDT_ENV_H
+#define _LIBFDT_ENV_H
+
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include <endian.h>
+#include <byteswap.h>
+
+#if __BYTE_ORDER == __BIG_ENDIAN
+#define fdt32_to_cpu(x) (x)
+#define cpu_to_fdt32(x) (x)
+#define fdt64_to_cpu(x) (x)
+#define cpu_to_fdt64(x) (x)
+#else
+#define fdt32_to_cpu(x) (bswap_32((x)))
+#define cpu_to_fdt32(x) (bswap_32((x)))
+#define fdt64_to_cpu(x) (bswap_64((x)))
+#define cpu_to_fdt64(x) (bswap_64((x)))
+#endif
+
+#endif /* _LIBFDT_ENV_H */
diff --git a/kvm/libfdt/libfdt_internal.h b/kvm/libfdt/libfdt_internal.h
new file mode 100644
index 000000000..52e1b8d81
--- /dev/null
+++ b/kvm/libfdt/libfdt_internal.h
@@ -0,0 +1,96 @@
+#ifndef _LIBFDT_INTERNAL_H
+#define _LIBFDT_INTERNAL_H
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ * b) Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <fdt.h>
+
+#define ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
+#define PALIGN(p, a) ((void *)ALIGN((unsigned long)(p), (a)))
+
+#define memeq(p, q, n) (memcmp((p), (q), (n)) == 0)
+#define streq(p, q) (strcmp((p), (q)) == 0)
+
+#define CHECK_HEADER(fdt) \
+ { \
+ int err; \
+ if ((err = fdt_check_header(fdt)) != 0) \
+ return err; \
+ }
+
+uint32_t _fdt_next_tag(const void *fdt, int startoffset, int *nextoffset);
+const char *_fdt_find_string(const char *strtab, int tabsize, const char *s);
+int _fdt_node_end_offset(void *fdt, int nodeoffset);
+
+static inline const void *_fdt_offset_ptr(const void *fdt, int offset)
+{
+ return fdt + fdt_off_dt_struct(fdt) + offset;
+}
+
+static inline void *_fdt_offset_ptr_w(void *fdt, int offset)
+{
+ return (void *)_fdt_offset_ptr(fdt, offset);
+}
+
+static inline const struct fdt_reserve_entry *_fdt_mem_rsv(const void *fdt, int n)
+{
+ const struct fdt_reserve_entry *rsv_table =
+ fdt + fdt_off_mem_rsvmap(fdt);
+
+ return rsv_table + n;
+}
+static inline struct fdt_reserve_entry *_fdt_mem_rsv_w(void *fdt, int n)
+{
+ return (void *)_fdt_mem_rsv(fdt, n);
+}
+
+#define SW_MAGIC (~FDT_MAGIC)
+
+#endif /* _LIBFDT_INTERNAL_H */
diff --git a/kvm/libkvm/Makefile b/kvm/libkvm/Makefile
new file mode 100644
index 000000000..4db773de0
--- /dev/null
+++ b/kvm/libkvm/Makefile
@@ -0,0 +1,54 @@
+all: libkvm.a
+
+include ../../config-host.mak
+ifneq ($(VPATH),)
+srcdir=$(VPATH)/kvm/libkvm
+else
+srcdir=.
+endif
+
+include $(srcdir)/config-$(ARCH).mak
+
+
+# libkvm is not -Wredundant-decls friendly yet
+CFLAGS += -Wno-redundant-decls
+
+# cc-option
+# Usage: OP_CFLAGS+=$(call cc-option, -falign-functions=0, -malign-functions=0)
+cc-option = $(shell if $(CC) $(1) -S -o /dev/null -xc /dev/null \
+ > /dev/null 2>&1; then echo "$(1)"; else echo "$(2)"; fi ;)
+
+CFLAGS += $(autodepend-flags) -g -fomit-frame-pointer -Wall
+CFLAGS += $(call cc-option, -fno-stack-protector, "")
+CFLAGS += $(call cc-option, -fno-stack-protector-all, "")
+CFLAGS += $(KVM_CFLAGS)
+
+LDFLAGS += $(CFLAGS)
+
+CXXFLAGS = $(autodepend-flags)
+
+VPATH:=$(VPATH)/kvm/libkvm
+
+autodepend-flags = -MMD -MF $(dir $*).$(notdir $*).d
+
+
+libkvm.a: libkvm.o $(libkvm-$(ARCH)-objs)
+ $(AR) rcs $@ $^
+
+install:
+ @echo skipping libkvm install
+
+install-libkvm:
+ install -D libkvm.h $(DESTDIR)/$(PREFIX)/include/libkvm.h
+ install -D libkvm.a $(DESTDIR)/$(PREFIX)/$(LIBDIR)/libkvm.a
+
+install-kernel-headers:
+ install -D $(LIBKVM_KERNELDIR)/include/linux/kvm.h \
+ $(DESTDIR)/$(PREFIX)/include/linux/kvm.h
+ install -D $(LIBKVM_KERNELDIR)/include/linux/kvm_para.h \
+ $(DESTDIR)/$(PREFIX)/include/linux/kvm_para.h
+
+-include .*.d
+
+clean:
+ $(RM) *.o *.a .*.d
diff --git a/kvm/libkvm/config-i386.mak b/kvm/libkvm/config-i386.mak
new file mode 100644
index 000000000..2706b70f7
--- /dev/null
+++ b/kvm/libkvm/config-i386.mak
@@ -0,0 +1,6 @@
+
+LIBDIR := /lib
+CFLAGS += -m32
+CFLAGS += -D__i386__
+
+libkvm-$(ARCH)-objs := libkvm-x86.o
diff --git a/kvm/libkvm/config-ia64.mak b/kvm/libkvm/config-ia64.mak
new file mode 100644
index 000000000..568c39707
--- /dev/null
+++ b/kvm/libkvm/config-ia64.mak
@@ -0,0 +1,5 @@
+
+LIBDIR := /lib
+CFLAGS += -D__ia64__
+
+libkvm-$(ARCH)-objs := libkvm-ia64.o
diff --git a/kvm/libkvm/config-ppc.mak b/kvm/libkvm/config-ppc.mak
new file mode 100644
index 000000000..091da370d
--- /dev/null
+++ b/kvm/libkvm/config-ppc.mak
@@ -0,0 +1,4 @@
+
+LIBDIR := /lib
+
+libkvm-$(ARCH)-objs := libkvm-powerpc.o
diff --git a/kvm/libkvm/config-s390.mak b/kvm/libkvm/config-s390.mak
new file mode 100644
index 000000000..8177e4ad0
--- /dev/null
+++ b/kvm/libkvm/config-s390.mak
@@ -0,0 +1,3 @@
+# s390 31bit mode
+LIBDIR := /lib
+libkvm-$(ARCH)-objs := libkvm-s390.o
diff --git a/kvm/libkvm/config-s390x.mak b/kvm/libkvm/config-s390x.mak
new file mode 100644
index 000000000..f08ed3d88
--- /dev/null
+++ b/kvm/libkvm/config-s390x.mak
@@ -0,0 +1,3 @@
+# s390 64 bit mode (arch=s390x)
+LIBDIR := /lib64
+libkvm-$(ARCH)-objs := libkvm-s390.o
diff --git a/kvm/libkvm/config-x86_64.mak b/kvm/libkvm/config-x86_64.mak
new file mode 100644
index 000000000..e6389775a
--- /dev/null
+++ b/kvm/libkvm/config-x86_64.mak
@@ -0,0 +1,6 @@
+
+LIBDIR := /lib64
+CFLAGS += -m64
+CFLAGS += -D__x86_64__
+
+libkvm-$(ARCH)-objs := libkvm-x86.o
diff --git a/kvm/libkvm/kvm-common.h b/kvm/libkvm/kvm-common.h
new file mode 100644
index 000000000..c95c59169
--- /dev/null
+++ b/kvm/libkvm/kvm-common.h
@@ -0,0 +1,94 @@
+/*
+ * This header is for functions & variables that will ONLY be
+ * used inside libkvm.
+ *
+ * derived from libkvm.c
+ *
+ * Copyright (C) 2006 Qumranet, Inc.
+ *
+ * Authors:
+ * Avi Kivity <avi@qumranet.com>
+ * Yaniv Kamay <yaniv@qumranet.com>
+ *
+ * This work is licensed under the GNU LGPL license, version 2.
+ */
+
+#ifndef KVM_COMMON_H
+#define KVM_COMMON_H
+
+/* FIXME: share this number with kvm */
+/* FIXME: or dynamically alloc/realloc regions */
+#ifdef __s390__
+#define KVM_MAX_NUM_MEM_REGIONS 1u
+#define MAX_VCPUS 64
+#define LIBKVM_S390_ORIGIN (0UL)
+#elif defined(__ia64__)
+#define KVM_MAX_NUM_MEM_REGIONS 32u
+#define MAX_VCPUS 256
+#else
+#define KVM_MAX_NUM_MEM_REGIONS 32u
+#define MAX_VCPUS 16
+#endif
+
+
+/* kvm abi verison variable */
+extern int kvm_abi;
+
+/**
+ * \brief The KVM context
+ *
+ * The verbose KVM context
+ */
+
+struct kvm_context {
+ /// Filedescriptor to /dev/kvm
+ int fd;
+ int vm_fd;
+ int vcpu_fd[MAX_VCPUS];
+ struct kvm_run *run[MAX_VCPUS];
+ /// Callbacks that KVM uses to emulate various unvirtualizable functionality
+ struct kvm_callbacks *callbacks;
+ void *opaque;
+ /// is dirty pages logging enabled for all regions or not
+ int dirty_pages_log_all;
+ /// do not create in-kernel irqchip if set
+ int no_irqchip_creation;
+ /// in-kernel irqchip status
+ int irqchip_in_kernel;
+ /// ioctl to use to inject interrupts
+ int irqchip_inject_ioctl;
+ /// do not create in-kernel pit if set
+ int no_pit_creation;
+ /// in-kernel pit status
+ int pit_in_kernel;
+ /// in-kernel coalesced mmio
+ int coalesced_mmio;
+#ifdef KVM_CAP_IRQ_ROUTING
+ struct kvm_irq_routing *irq_routes;
+ int nr_allocated_irq_routes;
+#endif
+ void *used_gsi_bitmap;
+ int max_gsi;
+};
+
+int kvm_alloc_kernel_memory(kvm_context_t kvm, unsigned long memory,
+ void **vm_mem);
+int kvm_alloc_userspace_memory(kvm_context_t kvm, unsigned long memory,
+ void **vm_mem);
+
+int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
+ void **vm_mem);
+int kvm_arch_run(struct kvm_run *run, kvm_context_t kvm, int vcpu);
+
+
+void kvm_show_code(kvm_context_t kvm, int vcpu);
+
+int handle_halt(kvm_context_t kvm, int vcpu);
+int handle_shutdown(kvm_context_t kvm, void *env);
+void post_kvm_run(kvm_context_t kvm, void *env);
+int pre_kvm_run(kvm_context_t kvm, void *env);
+int handle_io_window(kvm_context_t kvm);
+int handle_debug(kvm_context_t kvm, int vcpu, void *env);
+int try_push_interrupts(kvm_context_t kvm);
+
+#endif
diff --git a/kvm/libkvm/kvm-ia64.h b/kvm/libkvm/kvm-ia64.h
new file mode 100644
index 000000000..ad87ae764
--- /dev/null
+++ b/kvm/libkvm/kvm-ia64.h
@@ -0,0 +1,31 @@
+/*
+ * This header is for functions & variables that will ONLY be
+ * used inside libkvm for x86.
+ * THESE ARE NOT EXPOSED TO THE USER AND ARE ONLY FOR USE
+ * WITHIN LIBKVM.
+ *
+ * derived from libkvm.c
+ *
+ * Copyright (C) 2006 Qumranet, Inc.
+ *
+ * Authors:
+ * Avi Kivity <avi@qumranet.com>
+ * Yaniv Kamay <yaniv@qumranet.com>
+ *
+ * This work is licensed under the GNU LGPL license, version 2.
+ */
+
+#ifndef KVM_IA64_H
+#define KVM_IA64_H
+
+#include "kvm-common.h"
+
+extern int kvm_page_size;
+
+#define PAGE_SIZE kvm_page_size
+#define PAGE_MASK (~(kvm_page_size - 1))
+
+#define ia64_mf() asm volatile ("mf" ::: "memory")
+#define smp_wmb() ia64_mf()
+
+#endif
diff --git a/kvm/libkvm/kvm-powerpc.h b/kvm/libkvm/kvm-powerpc.h
new file mode 100644
index 000000000..b09511c1c
--- /dev/null
+++ b/kvm/libkvm/kvm-powerpc.h
@@ -0,0 +1,36 @@
+/*
+ * This header is for functions & variables that will ONLY be
+ * used inside libkvm for powerpc.
+ * THESE ARE NOT EXPOSED TO THE USER AND ARE ONLY FOR USE
+ * WITHIN LIBKVM.
+ *
+ * Copyright (C) 2006 Qumranet, Inc.
+ *
+ * Authors:
+ * Avi Kivity <avi@qumranet.com>
+ * Yaniv Kamay <yaniv@qumranet.com>
+ *
+ * Copyright 2007 IBM Corporation.
+ * Added by: Jerone Young <jyoung5@us.ibm.com>
+ *
+ * This work is licensed under the GNU LGPL license, version 2.
+ */
+
+#ifndef KVM_POWERPC_H
+#define KVM_POWERPC_H
+
+#include "kvm-common.h"
+
+extern int kvm_page_size;
+
+#define PAGE_SIZE kvm_page_size
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
+static inline void eieio(void)
+{
+ asm volatile("eieio" : : : "memory");
+}
+
+#define smp_wmb() eieio()
+
+#endif
diff --git a/kvm/libkvm/kvm-s390.h b/kvm/libkvm/kvm-s390.h
new file mode 100644
index 000000000..9edd9a33b
--- /dev/null
+++ b/kvm/libkvm/kvm-s390.h
@@ -0,0 +1,31 @@
+/*
+ * This header is for functions & variables that will ONLY be
+ * used inside libkvm for s390.
+ * THESE ARE NOT EXPOSED TO THE USER AND ARE ONLY FOR USE
+ * WITHIN LIBKVM.
+ *
+ * Copyright (C) 2006 Qumranet, Inc.
+ *
+ * Authors:
+ * Avi Kivity <avi@qumranet.com>
+ * Yaniv Kamay <yaniv@qumranet.com>
+ *
+ * Copyright 2008 IBM Corporation.
+ * Authors:
+ * Carsten Otte <cotte@de.ibm.com>
+ *
+ * This work is licensed under the GNU LGPL license, version 2.
+ */
+
+#ifndef KVM_S390_H
+#define KVM_S390_H
+
+#include <asm/ptrace.h>
+#include "kvm-common.h"
+
+#define PAGE_SIZE 4096ul
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
+#define smp_wmb() asm volatile("" ::: "memory")
+
+#endif
diff --git a/kvm/libkvm/kvm-x86.h b/kvm/libkvm/kvm-x86.h
new file mode 100644
index 000000000..e988cb7bb
--- /dev/null
+++ b/kvm/libkvm/kvm-x86.h
@@ -0,0 +1,55 @@
+/*
+ * This header is for functions & variables that will ONLY be
+ * used inside libkvm for x86.
+ * THESE ARE NOT EXPOSED TO THE USER AND ARE ONLY FOR USE
+ * WITHIN LIBKVM.
+ *
+ * derived from libkvm.c
+ *
+ * Copyright (C) 2006 Qumranet, Inc.
+ *
+ * Authors:
+ * Avi Kivity <avi@qumranet.com>
+ * Yaniv Kamay <yaniv@qumranet.com>
+ *
+ * This work is licensed under the GNU LGPL license, version 2.
+ */
+
+#ifndef KVM_X86_H
+#define KVM_X86_H
+
+#include "kvm-common.h"
+
+#define PAGE_SIZE 4096ul
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
+int kvm_set_tss_addr(kvm_context_t kvm, unsigned long addr);
+
+#ifdef KVM_CAP_VAPIC
+
+/*!
+ * \brief Enable kernel tpr access reporting
+ *
+ * When tpr access reporting is enabled, the kernel will call the
+ * ->tpr_access() callback every time the guest vcpu accesses the tpr.
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu vcpu to enable tpr access reporting on
+ */
+int kvm_enable_tpr_access_reporting(kvm_context_t kvm, int vcpu);
+
+/*!
+ * \brief Disable kernel tpr access reporting
+ *
+ * Undoes the effect of kvm_enable_tpr_access_reporting().
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu vcpu to disable tpr access reporting on
+ */
+int kvm_disable_tpr_access_reporting(kvm_context_t kvm, int vcpu);
+
+#endif
+
+#define smp_wmb() asm volatile("" ::: "memory")
+
+#endif
diff --git a/kvm/libkvm/libkvm-ia64.c b/kvm/libkvm/libkvm-ia64.c
new file mode 100644
index 000000000..2f1567595
--- /dev/null
+++ b/kvm/libkvm/libkvm-ia64.c
@@ -0,0 +1,82 @@
+/*
+ * libkvm-ia64.c :Kernel-based Virtual Machine control library for ia64.
+ *
+ * This library provides an API to control the kvm hardware virtualization
+ * module.
+ *
+ * Copyright (C) 2006 Qumranet
+ *
+ * Authors:
+ *
+ * Avi Kivity <avi@qumranet.com>
+ * Yaniv Kamay <yaniv@qumranet.com>
+ *
+ * Copyright (C) 2007 Intel
+ * Added by : Zhang Xiantao <xiantao.zhang@intel.com>
+ *
+ * This work is licensed under the GNU LGPL license, version 2.
+ *
+ */
+
+#include "libkvm.h"
+#include "kvm-ia64.h"
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <string.h>
+#include <unistd.h>
+#include <stropts.h>
+#include <sys/mman.h>
+#include <stdio.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdlib.h>
+
+int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
+ void **vm_mem)
+{
+ int r;
+
+ r = kvm_init_coalesced_mmio(kvm);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+int kvm_arch_run(struct kvm_run *run,kvm_context_t kvm, int vcpu)
+{
+ int r = 0;
+
+ switch (run->exit_reason) {
+ default:
+ r = 1;
+ break;
+ }
+
+ return r;
+}
+
+void kvm_show_code(kvm_context_t kvm, int vcpu)
+{
+ fprintf(stderr, "kvm_show_code not supported yet!\n");
+}
+
+void kvm_show_regs(kvm_context_t kvm, int vcpu)
+{
+ fprintf(stderr,"kvm_show_regs not supportted today!\n");
+}
+
+int kvm_create_memory_alias(kvm_context_t kvm,
+ uint64_t phys_start,
+ uint64_t len,
+ uint64_t target_phys)
+{
+ return 0;
+}
+
+int kvm_destroy_memory_alias(kvm_context_t kvm, uint64_t phys_start)
+{
+ return 0;
+}
diff --git a/kvm/libkvm/libkvm-powerpc.c b/kvm/libkvm/libkvm-powerpc.c
new file mode 100644
index 000000000..f2cd8dc32
--- /dev/null
+++ b/kvm/libkvm/libkvm-powerpc.c
@@ -0,0 +1,100 @@
+/*
+ * This file contains the powerpc specific implementation for the
+ * architecture dependent functions defined in kvm-common.h and
+ * libkvm.h
+ *
+ * Copyright (C) 2006 Qumranet, Inc.
+ *
+ * Authors:
+ * Avi Kivity <avi@qumranet.com>
+ * Yaniv Kamay <yaniv@qumranet.com>
+ *
+ * Copyright IBM Corp. 2007,2008
+ * Authors:
+ * Jerone Young <jyoung5@us.ibm.com>
+ * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the GNU LGPL license, version 2.
+ */
+
+#include "libkvm.h"
+#include "kvm-powerpc.h"
+#include <errno.h>
+#include <stdio.h>
+#include <inttypes.h>
+
+int handle_dcr(struct kvm_run *run, kvm_context_t kvm, int vcpu)
+{
+ int ret = 0;
+
+ if (run->dcr.is_write)
+ ret = kvm->callbacks->powerpc_dcr_write(vcpu,
+ run->dcr.dcrn,
+ run->dcr.data);
+ else
+ ret = kvm->callbacks->powerpc_dcr_read(vcpu,
+ run->dcr.dcrn,
+ &(run->dcr.data));
+
+ return ret;
+}
+
+void kvm_show_code(kvm_context_t kvm, int vcpu)
+{
+ fprintf(stderr, "%s: Operation not supported\n", __FUNCTION__);
+}
+
+void kvm_show_regs(kvm_context_t kvm, int vcpu)
+{
+ struct kvm_regs regs;
+ int i;
+
+ if (kvm_get_regs(kvm, vcpu, &regs))
+ return;
+
+ fprintf(stderr,"guest vcpu #%d\n", vcpu);
+ fprintf(stderr,"pc: %016"PRIx64" msr: %016"PRIx64"\n",
+ regs.pc, regs.msr);
+ fprintf(stderr,"lr: %016"PRIx64" ctr: %016"PRIx64"\n",
+ regs.lr, regs.ctr);
+ fprintf(stderr,"srr0: %016"PRIx64" srr1: %016"PRIx64"\n",
+ regs.srr0, regs.srr1);
+ for (i=0; i<32; i+=4)
+ {
+ fprintf(stderr, "gpr%02d: %016"PRIx64" %016"PRIx64" %016"PRIx64
+ " %016"PRIx64"\n", i,
+ regs.gpr[i],
+ regs.gpr[i+1],
+ regs.gpr[i+2],
+ regs.gpr[i+3]);
+ }
+
+ fflush(stdout);
+}
+
+int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
+ void **vm_mem)
+{
+ int r;
+
+ r = kvm_init_coalesced_mmio(kvm);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+int kvm_arch_run(struct kvm_run *run, kvm_context_t kvm, int vcpu)
+{
+ int ret = 0;
+
+ switch (run->exit_reason){
+ case KVM_EXIT_DCR:
+ ret = handle_dcr(run, kvm, vcpu);
+ break;
+ default:
+ ret = 1;
+ break;
+ }
+ return ret;
+}
diff --git a/kvm/libkvm/libkvm-s390.c b/kvm/libkvm/libkvm-s390.c
new file mode 100644
index 000000000..041c0ce31
--- /dev/null
+++ b/kvm/libkvm/libkvm-s390.c
@@ -0,0 +1,110 @@
+/*
+ * This file contains the s390 specific implementation for the
+ * architecture dependent functions defined in kvm-common.h and
+ * libkvm.h
+ *
+ * Copyright (C) 2006 Qumranet
+ * Copyright IBM Corp. 2008
+ *
+ * Authors:
+ * Carsten Otte <cotte@de.ibm.com>
+ * Christian Borntraeger <borntraeger@de.ibm.com>
+ *
+ * This work is licensed under the GNU LGPL license, version 2.
+ */
+
+#include <sys/ioctl.h>
+#include <asm/ptrace.h>
+
+#include "libkvm.h"
+#include "kvm-common.h"
+#include <errno.h>
+#include <stdio.h>
+#include <inttypes.h>
+
+void kvm_show_code(kvm_context_t kvm, int vcpu)
+{
+ fprintf(stderr, "%s: Operation not supported\n", __FUNCTION__);
+}
+
+void kvm_show_regs(kvm_context_t kvm, int vcpu)
+{
+ struct kvm_regs regs;
+ struct kvm_sregs sregs;
+ int i;
+
+ if (kvm_get_regs(kvm, vcpu, &regs))
+ return;
+
+ if (kvm_get_sregs(kvm, vcpu, &sregs))
+ return;
+
+ fprintf(stderr, "guest vcpu #%d\n", vcpu);
+ fprintf(stderr, "PSW:\t%16.16lx %16.16lx\n",
+ kvm->run[vcpu]->s390_sieic.mask,
+ kvm->run[vcpu]->s390_sieic.addr);
+ fprintf(stderr,"GPRS:");
+ for (i=0; i<15; i+=4)
+ fprintf(stderr, "\t%16.16lx %16.16lx %16.16lx %16.16lx\n",
+ regs.gprs[i],
+ regs.gprs[i+1],
+ regs.gprs[i+2],
+ regs.gprs[i+3]);
+ fprintf(stderr,"ACRS:");
+ for (i=0; i<15; i+=4)
+ fprintf(stderr, "\t%8.8x %8.8x %8.8x %8.8x\n",
+ sregs.acrs[i],
+ sregs.acrs[i+1],
+ sregs.acrs[i+2],
+ sregs.acrs[i+3]);
+
+ fprintf(stderr,"CRS:");
+ for (i=0; i<15; i+=4)
+ fprintf(stderr, "\t%16.16lx %16.16lx %16.16lx %16.16lx\n",
+ sregs.crs[i],
+ sregs.crs[i+1],
+ sregs.crs[i+2],
+ sregs.crs[i+3]);
+}
+
+int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
+ void **vm_mem)
+{
+ return 0;
+}
+
+int kvm_arch_run(struct kvm_run *run, kvm_context_t kvm, int vcpu)
+{
+ int ret = 0;
+
+ switch (run->exit_reason){
+ default:
+ ret = 1;
+ break;
+ }
+ return ret;
+}
+
+int kvm_s390_initial_reset(kvm_context_t kvm, int slot)
+{
+ return ioctl(kvm->vcpu_fd[slot], KVM_S390_INITIAL_RESET, NULL);
+}
+
+int kvm_s390_interrupt(kvm_context_t kvm, int slot,
+ struct kvm_s390_interrupt *kvmint)
+{
+ if (slot>=0)
+ return ioctl(kvm->vcpu_fd[slot], KVM_S390_INTERRUPT, kvmint);
+ else
+ return ioctl(kvm->vm_fd, KVM_S390_INTERRUPT, kvmint);
+}
+
+int kvm_s390_set_initial_psw(kvm_context_t kvm, int slot, psw_t psw)
+{
+ return ioctl(kvm->vcpu_fd[slot], KVM_S390_SET_INITIAL_PSW, &psw);
+}
+
+int kvm_s390_store_status(kvm_context_t kvm, int slot, unsigned long addr)
+{
+ return ioctl(kvm->vcpu_fd[slot], KVM_S390_STORE_STATUS, addr);
+}
diff --git a/kvm/libkvm/libkvm-x86.c b/kvm/libkvm/libkvm-x86.c
new file mode 100644
index 000000000..f1aef7617
--- /dev/null
+++ b/kvm/libkvm/libkvm-x86.c
@@ -0,0 +1,676 @@
+#include "libkvm.h"
+#include "kvm-x86.h"
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <stdio.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdlib.h>
+
+int kvm_set_tss_addr(kvm_context_t kvm, unsigned long addr)
+{
+#ifdef KVM_CAP_SET_TSS_ADDR
+ int r;
+
+ r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
+ if (r > 0) {
+ r = ioctl(kvm->vm_fd, KVM_SET_TSS_ADDR, addr);
+ if (r == -1) {
+ fprintf(stderr, "kvm_set_tss_addr: %m\n");
+ return -errno;
+ }
+ return 0;
+ }
+#endif
+ return -ENOSYS;
+}
+
+static int kvm_init_tss(kvm_context_t kvm)
+{
+#ifdef KVM_CAP_SET_TSS_ADDR
+ int r;
+
+ r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
+ if (r > 0) {
+ /*
+ * this address is 3 pages before the bios, and the bios should present
+ * as unavaible memory
+ */
+ r = kvm_set_tss_addr(kvm, 0xfffbd000);
+ if (r < 0) {
+ fprintf(stderr, "kvm_init_tss: unable to set tss addr\n");
+ return r;
+ }
+
+ }
+#endif
+ return 0;
+}
+
+static int kvm_create_pit(kvm_context_t kvm)
+{
+#ifdef KVM_CAP_PIT
+ int r;
+
+ kvm->pit_in_kernel = 0;
+ if (!kvm->no_pit_creation) {
+#ifdef KVM_CAP_PIT2
+ struct kvm_pit_config config = { .flags = 0 };
+
+ r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_PIT2);
+ if (r > 0)
+ r = ioctl(kvm->vm_fd, KVM_CREATE_PIT2, &config);
+ else
+#endif
+ {
+ r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_PIT);
+ if (r <= 0)
+ return 0;
+
+ r = ioctl(kvm->vm_fd, KVM_CREATE_PIT);
+ }
+ if (r < 0) {
+ fprintf(stderr, "Create kernel PIC irqchip failed\n");
+ return r;
+ }
+ kvm->pit_in_kernel = 1;
+ }
+#endif
+ return 0;
+}
+
+int kvm_arch_create(kvm_context_t kvm, unsigned long phys_mem_bytes,
+ void **vm_mem)
+{
+ int r = 0;
+
+ r = kvm_init_tss(kvm);
+ if (r < 0)
+ return r;
+
+ r = kvm_create_pit(kvm);
+ if (r < 0)
+ return r;
+
+ r = kvm_init_coalesced_mmio(kvm);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+#ifdef KVM_EXIT_TPR_ACCESS
+
+static int handle_tpr_access(kvm_context_t kvm, struct kvm_run *run, int vcpu)
+{
+ return kvm->callbacks->tpr_access(kvm->opaque, vcpu,
+ run->tpr_access.rip,
+ run->tpr_access.is_write);
+}
+
+
+int kvm_enable_vapic(kvm_context_t kvm, int vcpu, uint64_t vapic)
+{
+ int r;
+ struct kvm_vapic_addr va = {
+ .vapic_addr = vapic,
+ };
+
+ r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_VAPIC_ADDR, &va);
+ if (r == -1) {
+ r = -errno;
+ perror("kvm_enable_vapic");
+ return r;
+ }
+ return 0;
+}
+
+#endif
+
+int kvm_arch_run(struct kvm_run *run,kvm_context_t kvm, int vcpu)
+{
+ int r = 0;
+
+ switch (run->exit_reason) {
+#ifdef KVM_EXIT_SET_TPR
+ case KVM_EXIT_SET_TPR:
+ break;
+#endif
+#ifdef KVM_EXIT_TPR_ACCESS
+ case KVM_EXIT_TPR_ACCESS:
+ r = handle_tpr_access(kvm, run, vcpu);
+ break;
+#endif
+ default:
+ r = 1;
+ break;
+ }
+
+ return r;
+}
+
+#define MAX_ALIAS_SLOTS 4
+static struct {
+ uint64_t start;
+ uint64_t len;
+} kvm_aliases[MAX_ALIAS_SLOTS];
+
+static int get_alias_slot(uint64_t start)
+{
+ int i;
+
+ for (i=0; i<MAX_ALIAS_SLOTS; i++)
+ if (kvm_aliases[i].start == start)
+ return i;
+ return -1;
+}
+static int get_free_alias_slot(void)
+{
+ int i;
+
+ for (i=0; i<MAX_ALIAS_SLOTS; i++)
+ if (kvm_aliases[i].len == 0)
+ return i;
+ return -1;
+}
+
+static void register_alias(int slot, uint64_t start, uint64_t len)
+{
+ kvm_aliases[slot].start = start;
+ kvm_aliases[slot].len = len;
+}
+
+int kvm_create_memory_alias(kvm_context_t kvm,
+ uint64_t phys_start,
+ uint64_t len,
+ uint64_t target_phys)
+{
+ struct kvm_memory_alias alias = {
+ .flags = 0,
+ .guest_phys_addr = phys_start,
+ .memory_size = len,
+ .target_phys_addr = target_phys,
+ };
+ int fd = kvm->vm_fd;
+ int r;
+ int slot;
+
+ slot = get_alias_slot(phys_start);
+ if (slot < 0)
+ slot = get_free_alias_slot();
+ if (slot < 0)
+ return -EBUSY;
+ alias.slot = slot;
+
+ r = ioctl(fd, KVM_SET_MEMORY_ALIAS, &alias);
+ if (r == -1)
+ return -errno;
+
+ register_alias(slot, phys_start, len);
+ return 0;
+}
+
+int kvm_destroy_memory_alias(kvm_context_t kvm, uint64_t phys_start)
+{
+ return kvm_create_memory_alias(kvm, phys_start, 0, 0);
+}
+
+#ifdef KVM_CAP_IRQCHIP
+
+int kvm_get_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s)
+{
+ int r;
+ if (!kvm->irqchip_in_kernel)
+ return 0;
+ r = ioctl(kvm->vcpu_fd[vcpu], KVM_GET_LAPIC, s);
+ if (r == -1) {
+ r = -errno;
+ perror("kvm_get_lapic");
+ }
+ return r;
+}
+
+int kvm_set_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s)
+{
+ int r;
+ if (!kvm->irqchip_in_kernel)
+ return 0;
+ r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_LAPIC, s);
+ if (r == -1) {
+ r = -errno;
+ perror("kvm_set_lapic");
+ }
+ return r;
+}
+
+#endif
+
+#ifdef KVM_CAP_PIT
+
+int kvm_get_pit(kvm_context_t kvm, struct kvm_pit_state *s)
+{
+ int r;
+ if (!kvm->pit_in_kernel)
+ return 0;
+ r = ioctl(kvm->vm_fd, KVM_GET_PIT, s);
+ if (r == -1) {
+ r = -errno;
+ perror("kvm_get_pit");
+ }
+ return r;
+}
+
+int kvm_set_pit(kvm_context_t kvm, struct kvm_pit_state *s)
+{
+ int r;
+ if (!kvm->pit_in_kernel)
+ return 0;
+ r = ioctl(kvm->vm_fd, KVM_SET_PIT, s);
+ if (r == -1) {
+ r = -errno;
+ perror("kvm_set_pit");
+ }
+ return r;
+}
+
+#endif
+
+void kvm_show_code(kvm_context_t kvm, int vcpu)
+{
+#define SHOW_CODE_LEN 50
+ int fd = kvm->vcpu_fd[vcpu];
+ struct kvm_regs regs;
+ struct kvm_sregs sregs;
+ int r, n;
+ int back_offset;
+ unsigned char code;
+ char code_str[SHOW_CODE_LEN * 3 + 1];
+ unsigned long rip;
+
+ r = ioctl(fd, KVM_GET_SREGS, &sregs);
+ if (r == -1) {
+ perror("KVM_GET_SREGS");
+ return;
+ }
+ r = ioctl(fd, KVM_GET_REGS, &regs);
+ if (r == -1) {
+ perror("KVM_GET_REGS");
+ return;
+ }
+ rip = sregs.cs.base + regs.rip;
+ back_offset = regs.rip;
+ if (back_offset > 20)
+ back_offset = 20;
+ *code_str = 0;
+ for (n = -back_offset; n < SHOW_CODE_LEN-back_offset; ++n) {
+ if (n == 0)
+ strcat(code_str, " -->");
+ r = kvm->callbacks->mmio_read(kvm->opaque, rip + n, &code, 1);
+ if (r < 0) {
+ strcat(code_str, " xx");
+ continue;
+ }
+ sprintf(code_str + strlen(code_str), " %02x", code);
+ }
+ fprintf(stderr, "code:%s\n", code_str);
+}
+
+
+/*
+ * Returns available msr list. User must free.
+ */
+struct kvm_msr_list *kvm_get_msr_list(kvm_context_t kvm)
+{
+ struct kvm_msr_list sizer, *msrs;
+ int r, e;
+
+ sizer.nmsrs = 0;
+ r = ioctl(kvm->fd, KVM_GET_MSR_INDEX_LIST, &sizer);
+ if (r == -1 && errno != E2BIG)
+ return NULL;
+ msrs = malloc(sizeof *msrs + sizer.nmsrs * sizeof *msrs->indices);
+ if (!msrs) {
+ errno = ENOMEM;
+ return NULL;
+ }
+ msrs->nmsrs = sizer.nmsrs;
+ r = ioctl(kvm->fd, KVM_GET_MSR_INDEX_LIST, msrs);
+ if (r == -1) {
+ e = errno;
+ free(msrs);
+ errno = e;
+ return NULL;
+ }
+ return msrs;
+}
+
+int kvm_get_msrs(kvm_context_t kvm, int vcpu, struct kvm_msr_entry *msrs,
+ int n)
+{
+ struct kvm_msrs *kmsrs = malloc(sizeof *kmsrs + n * sizeof *msrs);
+ int r, e;
+
+ if (!kmsrs) {
+ errno = ENOMEM;
+ return -1;
+ }
+ kmsrs->nmsrs = n;
+ memcpy(kmsrs->entries, msrs, n * sizeof *msrs);
+ r = ioctl(kvm->vcpu_fd[vcpu], KVM_GET_MSRS, kmsrs);
+ e = errno;
+ memcpy(msrs, kmsrs->entries, n * sizeof *msrs);
+ free(kmsrs);
+ errno = e;
+ return r;
+}
+
+int kvm_set_msrs(kvm_context_t kvm, int vcpu, struct kvm_msr_entry *msrs,
+ int n)
+{
+ struct kvm_msrs *kmsrs = malloc(sizeof *kmsrs + n * sizeof *msrs);
+ int r, e;
+
+ if (!kmsrs) {
+ errno = ENOMEM;
+ return -1;
+ }
+ kmsrs->nmsrs = n;
+ memcpy(kmsrs->entries, msrs, n * sizeof *msrs);
+ r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_MSRS, kmsrs);
+ e = errno;
+ free(kmsrs);
+ errno = e;
+ return r;
+}
+
+static void print_seg(FILE *file, const char *name, struct kvm_segment *seg)
+{
+ fprintf(stderr,
+ "%s %04x (%08llx/%08x p %d dpl %d db %d s %d type %x l %d"
+ " g %d avl %d)\n",
+ name, seg->selector, seg->base, seg->limit, seg->present,
+ seg->dpl, seg->db, seg->s, seg->type, seg->l, seg->g,
+ seg->avl);
+}
+
+static void print_dt(FILE *file, const char *name, struct kvm_dtable *dt)
+{
+ fprintf(stderr, "%s %llx/%x\n", name, dt->base, dt->limit);
+}
+
+void kvm_show_regs(kvm_context_t kvm, int vcpu)
+{
+ int fd = kvm->vcpu_fd[vcpu];
+ struct kvm_regs regs;
+ struct kvm_sregs sregs;
+ int r;
+
+ r = ioctl(fd, KVM_GET_REGS, &regs);
+ if (r == -1) {
+ perror("KVM_GET_REGS");
+ return;
+ }
+ fprintf(stderr,
+ "rax %016llx rbx %016llx rcx %016llx rdx %016llx\n"
+ "rsi %016llx rdi %016llx rsp %016llx rbp %016llx\n"
+ "r8 %016llx r9 %016llx r10 %016llx r11 %016llx\n"
+ "r12 %016llx r13 %016llx r14 %016llx r15 %016llx\n"
+ "rip %016llx rflags %08llx\n",
+ regs.rax, regs.rbx, regs.rcx, regs.rdx,
+ regs.rsi, regs.rdi, regs.rsp, regs.rbp,
+ regs.r8, regs.r9, regs.r10, regs.r11,
+ regs.r12, regs.r13, regs.r14, regs.r15,
+ regs.rip, regs.rflags);
+ r = ioctl(fd, KVM_GET_SREGS, &sregs);
+ if (r == -1) {
+ perror("KVM_GET_SREGS");
+ return;
+ }
+ print_seg(stderr, "cs", &sregs.cs);
+ print_seg(stderr, "ds", &sregs.ds);
+ print_seg(stderr, "es", &sregs.es);
+ print_seg(stderr, "ss", &sregs.ss);
+ print_seg(stderr, "fs", &sregs.fs);
+ print_seg(stderr, "gs", &sregs.gs);
+ print_seg(stderr, "tr", &sregs.tr);
+ print_seg(stderr, "ldt", &sregs.ldt);
+ print_dt(stderr, "gdt", &sregs.gdt);
+ print_dt(stderr, "idt", &sregs.idt);
+ fprintf(stderr, "cr0 %llx cr2 %llx cr3 %llx cr4 %llx cr8 %llx"
+ " efer %llx\n",
+ sregs.cr0, sregs.cr2, sregs.cr3, sregs.cr4, sregs.cr8,
+ sregs.efer);
+}
+
+uint64_t kvm_get_apic_base(kvm_context_t kvm, int vcpu)
+{
+ struct kvm_run *run = kvm->run[vcpu];
+
+ return run->apic_base;
+}
+
+void kvm_set_cr8(kvm_context_t kvm, int vcpu, uint64_t cr8)
+{
+ struct kvm_run *run = kvm->run[vcpu];
+
+ run->cr8 = cr8;
+}
+
+__u64 kvm_get_cr8(kvm_context_t kvm, int vcpu)
+{
+ return kvm->run[vcpu]->cr8;
+}
+
+int kvm_setup_cpuid(kvm_context_t kvm, int vcpu, int nent,
+ struct kvm_cpuid_entry *entries)
+{
+ struct kvm_cpuid *cpuid;
+ int r;
+
+ cpuid = malloc(sizeof(*cpuid) + nent * sizeof(*entries));
+ if (!cpuid)
+ return -ENOMEM;
+
+ cpuid->nent = nent;
+ memcpy(cpuid->entries, entries, nent * sizeof(*entries));
+ r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_CPUID, cpuid);
+
+ free(cpuid);
+ return r;
+}
+
+int kvm_setup_cpuid2(kvm_context_t kvm, int vcpu, int nent,
+ struct kvm_cpuid_entry2 *entries)
+{
+ struct kvm_cpuid2 *cpuid;
+ int r;
+
+ cpuid = malloc(sizeof(*cpuid) + nent * sizeof(*entries));
+ if (!cpuid)
+ return -ENOMEM;
+
+ cpuid->nent = nent;
+ memcpy(cpuid->entries, entries, nent * sizeof(*entries));
+ r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_CPUID2, cpuid);
+ if (r == -1) {
+ fprintf(stderr, "kvm_setup_cpuid2: %m\n");
+ r = -errno;
+ }
+ free(cpuid);
+ return r;
+}
+
+int kvm_set_shadow_pages(kvm_context_t kvm, unsigned int nrshadow_pages)
+{
+#ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
+ int r;
+
+ r = ioctl(kvm->fd, KVM_CHECK_EXTENSION,
+ KVM_CAP_MMU_SHADOW_CACHE_CONTROL);
+ if (r > 0) {
+ r = ioctl(kvm->vm_fd, KVM_SET_NR_MMU_PAGES, nrshadow_pages);
+ if (r == -1) {
+ fprintf(stderr, "kvm_set_shadow_pages: %m\n");
+ return -errno;
+ }
+ return 0;
+ }
+#endif
+ return -1;
+}
+
+int kvm_get_shadow_pages(kvm_context_t kvm, unsigned int *nrshadow_pages)
+{
+#ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
+ int r;
+
+ r = ioctl(kvm->fd, KVM_CHECK_EXTENSION,
+ KVM_CAP_MMU_SHADOW_CACHE_CONTROL);
+ if (r > 0) {
+ *nrshadow_pages = ioctl(kvm->vm_fd, KVM_GET_NR_MMU_PAGES);
+ return 0;
+ }
+#endif
+ return -1;
+}
+
+#ifdef KVM_CAP_VAPIC
+
+static int tpr_access_reporting(kvm_context_t kvm, int vcpu, int enabled)
+{
+ int r;
+ struct kvm_tpr_access_ctl tac = {
+ .enabled = enabled,
+ };
+
+ r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_VAPIC);
+ if (r == -1 || r == 0)
+ return -ENOSYS;
+ r = ioctl(kvm->vcpu_fd[vcpu], KVM_TPR_ACCESS_REPORTING, &tac);
+ if (r == -1) {
+ r = -errno;
+ perror("KVM_TPR_ACCESS_REPORTING");
+ return r;
+ }
+ return 0;
+}
+
+int kvm_enable_tpr_access_reporting(kvm_context_t kvm, int vcpu)
+{
+ return tpr_access_reporting(kvm, vcpu, 1);
+}
+
+int kvm_disable_tpr_access_reporting(kvm_context_t kvm, int vcpu)
+{
+ return tpr_access_reporting(kvm, vcpu, 0);
+}
+
+#endif
+
+#ifdef KVM_CAP_EXT_CPUID
+
+static struct kvm_cpuid2 *try_get_cpuid(kvm_context_t kvm, int max)
+{
+ struct kvm_cpuid2 *cpuid;
+ int r, size;
+
+ size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
+ cpuid = (struct kvm_cpuid2 *)malloc(size);
+ cpuid->nent = max;
+ r = ioctl(kvm->fd, KVM_GET_SUPPORTED_CPUID, cpuid);
+ if (r == -1)
+ r = -errno;
+ else if (r == 0 && cpuid->nent >= max)
+ r = -E2BIG;
+ if (r < 0) {
+ if (r == -E2BIG) {
+ free(cpuid);
+ return NULL;
+ } else {
+ fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
+ strerror(-r));
+ exit(1);
+ }
+ }
+ return cpuid;
+}
+
+#define R_EAX 0
+#define R_ECX 1
+#define R_EDX 2
+#define R_EBX 3
+#define R_ESP 4
+#define R_EBP 5
+#define R_ESI 6
+#define R_EDI 7
+
+uint32_t kvm_get_supported_cpuid(kvm_context_t kvm, uint32_t function, int reg)
+{
+ struct kvm_cpuid2 *cpuid;
+ int i, max;
+ uint32_t ret = 0;
+ uint32_t cpuid_1_edx;
+
+ if (!kvm_check_extension(kvm, KVM_CAP_EXT_CPUID)) {
+ return -1U;
+ }
+
+ max = 1;
+ while ((cpuid = try_get_cpuid(kvm, max)) == NULL) {
+ max *= 2;
+ }
+
+ for (i = 0; i < cpuid->nent; ++i) {
+ if (cpuid->entries[i].function == function) {
+ switch (reg) {
+ case R_EAX:
+ ret = cpuid->entries[i].eax;
+ break;
+ case R_EBX:
+ ret = cpuid->entries[i].ebx;
+ break;
+ case R_ECX:
+ ret = cpuid->entries[i].ecx;
+ break;
+ case R_EDX:
+ ret = cpuid->entries[i].edx;
+ if (function == 1) {
+ /* kvm misreports the following features
+ */
+ ret |= 1 << 12; /* MTRR */
+ ret |= 1 << 16; /* PAT */
+ ret |= 1 << 7; /* MCE */
+ ret |= 1 << 14; /* MCA */
+ }
+
+ /* On Intel, kvm returns cpuid according to
+ * the Intel spec, so add missing bits
+ * according to the AMD spec:
+ */
+ if (function == 0x80000001) {
+ cpuid_1_edx = kvm_get_supported_cpuid(kvm, 1, R_EDX);
+ ret |= cpuid_1_edx & 0xdfeff7ff;
+ }
+ break;
+ }
+ }
+ }
+
+ free(cpuid);
+
+ return ret;
+}
+
+#else
+
+uint32_t kvm_get_supported_cpuid(kvm_context_t kvm, uint32_t function, int reg)
+{
+ return -1U;
+}
+
+#endif
diff --git a/kvm/libkvm/libkvm.c b/kvm/libkvm/libkvm.c
new file mode 100644
index 000000000..c5d6a7f5e
--- /dev/null
+++ b/kvm/libkvm/libkvm.c
@@ -0,0 +1,1497 @@
+/*
+ * Kernel-based Virtual Machine control library
+ *
+ * This library provides an API to control the kvm hardware virtualization
+ * module.
+ *
+ * Copyright (C) 2006 Qumranet
+ *
+ * Authors:
+ *
+ * Avi Kivity <avi@qumranet.com>
+ * Yaniv Kamay <yaniv@qumranet.com>
+ *
+ * This work is licensed under the GNU LGPL license, version 2.
+ */
+
+#ifndef __user
+#define __user /* temporary, until installed via make headers_install */
+#endif
+
+#include <linux/kvm.h>
+
+#define EXPECTED_KVM_API_VERSION 12
+
+#if EXPECTED_KVM_API_VERSION != KVM_API_VERSION
+#error libkvm: userspace and kernel version mismatch
+#endif
+
+#include <unistd.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <inttypes.h>
+#include "libkvm.h"
+
+#if defined(__x86_64__) || defined(__i386__)
+#include "kvm-x86.h"
+#endif
+
+#if defined(__ia64__)
+#include "kvm-ia64.h"
+#endif
+
+#if defined(__powerpc__)
+#include "kvm-powerpc.h"
+#endif
+
+#if defined(__s390__)
+#include "kvm-s390.h"
+#endif
+
+//#define DEBUG_MEMREG
+#ifdef DEBUG_MEMREG
+#define DPRINTF(fmt, args...) \
+ do { fprintf(stderr, "%s:%d " fmt , __func__, __LINE__, ##args); } while (0)
+#else
+#define DPRINTF(fmt, args...) do {} while (0)
+#endif
+
+#define MIN(x,y) ((x) < (y) ? (x) : (y))
+#define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
+
+int kvm_abi = EXPECTED_KVM_API_VERSION;
+int kvm_page_size;
+
+static inline void set_gsi(kvm_context_t kvm, unsigned int gsi)
+{
+ uint32_t *bitmap = kvm->used_gsi_bitmap;
+
+ if (gsi < kvm->max_gsi)
+ bitmap[gsi / 32] |= 1U << (gsi % 32);
+ else
+ DPRINTF("Invalid GSI %d\n");
+}
+
+static inline void clear_gsi(kvm_context_t kvm, unsigned int gsi)
+{
+ uint32_t *bitmap = kvm->used_gsi_bitmap;
+
+ if (gsi < kvm->max_gsi)
+ bitmap[gsi / 32] &= ~(1U << (gsi % 32));
+ else
+ DPRINTF("Invalid GSI %d\n");
+}
+
+struct slot_info {
+ unsigned long phys_addr;
+ unsigned long len;
+ unsigned long userspace_addr;
+ unsigned flags;
+ int logging_count;
+};
+
+struct slot_info slots[KVM_MAX_NUM_MEM_REGIONS];
+
+static void init_slots(void)
+{
+ int i;
+
+ for (i = 0; i < KVM_MAX_NUM_MEM_REGIONS; ++i)
+ slots[i].len = 0;
+}
+
+static int get_free_slot(kvm_context_t kvm)
+{
+ int i;
+ int tss_ext;
+
+#if defined(KVM_CAP_SET_TSS_ADDR) && !defined(__s390__)
+ tss_ext = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
+#else
+ tss_ext = 0;
+#endif
+
+ /*
+ * on older kernels where the set tss ioctl is not supprted we must save
+ * slot 0 to hold the extended memory, as the vmx will use the last 3
+ * pages of this slot.
+ */
+ if (tss_ext > 0)
+ i = 0;
+ else
+ i = 1;
+
+ for (; i < KVM_MAX_NUM_MEM_REGIONS; ++i)
+ if (!slots[i].len)
+ return i;
+ return -1;
+}
+
+static void register_slot(int slot, unsigned long phys_addr, unsigned long len,
+ unsigned long userspace_addr, unsigned flags)
+{
+ slots[slot].phys_addr = phys_addr;
+ slots[slot].len = len;
+ slots[slot].userspace_addr = userspace_addr;
+ slots[slot].flags = flags;
+}
+
+static void free_slot(int slot)
+{
+ slots[slot].len = 0;
+ slots[slot].logging_count = 0;
+}
+
+static int get_slot(unsigned long phys_addr)
+{
+ int i;
+
+ for (i = 0; i < KVM_MAX_NUM_MEM_REGIONS ; ++i) {
+ if (slots[i].len && slots[i].phys_addr <= phys_addr &&
+ (slots[i].phys_addr + slots[i].len-1) >= phys_addr)
+ return i;
+ }
+ return -1;
+}
+
+/* Returns -1 if this slot is not totally contained on any other,
+ * and the number of the slot otherwise */
+static int get_container_slot(uint64_t phys_addr, unsigned long size)
+{
+ int i;
+
+ for (i = 0; i < KVM_MAX_NUM_MEM_REGIONS ; ++i)
+ if (slots[i].len && slots[i].phys_addr <= phys_addr &&
+ (slots[i].phys_addr + slots[i].len) >= phys_addr + size)
+ return i;
+ return -1;
+}
+
+int kvm_is_containing_region(kvm_context_t kvm, unsigned long phys_addr, unsigned long size)
+{
+ int slot = get_container_slot(phys_addr, size);
+ if (slot == -1)
+ return 0;
+ return 1;
+}
+
+/*
+ * dirty pages logging control
+ */
+static int kvm_dirty_pages_log_change(kvm_context_t kvm,
+ unsigned long phys_addr,
+ unsigned flags,
+ unsigned mask)
+{
+ int r = -1;
+ int slot = get_slot(phys_addr);
+
+ if (slot == -1) {
+ fprintf(stderr, "BUG: %s: invalid parameters\n", __FUNCTION__);
+ return 1;
+ }
+
+ flags = (slots[slot].flags & ~mask) | flags;
+ if (flags == slots[slot].flags)
+ return 0;
+ slots[slot].flags = flags;
+
+ {
+ struct kvm_userspace_memory_region mem = {
+ .slot = slot,
+ .memory_size = slots[slot].len,
+ .guest_phys_addr = slots[slot].phys_addr,
+ .userspace_addr = slots[slot].userspace_addr,
+ .flags = slots[slot].flags,
+ };
+
+
+ DPRINTF("slot %d start %llx len %llx flags %x\n",
+ mem.slot,
+ mem.guest_phys_addr,
+ mem.memory_size,
+ mem.flags);
+ r = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem);
+ if (r == -1)
+ fprintf(stderr, "%s: %m\n", __FUNCTION__);
+ }
+ return r;
+}
+
+static int kvm_dirty_pages_log_change_all(kvm_context_t kvm,
+ int (*change)(kvm_context_t kvm,
+ uint64_t start,
+ uint64_t len))
+{
+ int i, r;
+
+ for (i=r=0; i<KVM_MAX_NUM_MEM_REGIONS && r==0; i++) {
+ if (slots[i].len)
+ r = change(kvm, slots[i].phys_addr, slots[i].len);
+ }
+ return r;
+}
+
+int kvm_dirty_pages_log_enable_slot(kvm_context_t kvm,
+ uint64_t phys_addr,
+ uint64_t len)
+{
+ int slot = get_slot(phys_addr);
+
+ DPRINTF("start %"PRIx64" len %"PRIx64"\n", phys_addr, len);
+ if (slot == -1) {
+ fprintf(stderr, "BUG: %s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ if (slots[slot].logging_count++)
+ return 0;
+
+ return kvm_dirty_pages_log_change(kvm, slots[slot].phys_addr,
+ KVM_MEM_LOG_DIRTY_PAGES,
+ KVM_MEM_LOG_DIRTY_PAGES);
+}
+
+int kvm_dirty_pages_log_disable_slot(kvm_context_t kvm,
+ uint64_t phys_addr,
+ uint64_t len)
+{
+ int slot = get_slot(phys_addr);
+
+ if (slot == -1) {
+ fprintf(stderr, "BUG: %s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ if (--slots[slot].logging_count)
+ return 0;
+
+ return kvm_dirty_pages_log_change(kvm, slots[slot].phys_addr,
+ 0,
+ KVM_MEM_LOG_DIRTY_PAGES);
+}
+
+/**
+ * Enable dirty page logging for all memory regions
+ */
+int kvm_dirty_pages_log_enable_all(kvm_context_t kvm)
+{
+ if (kvm->dirty_pages_log_all)
+ return 0;
+ kvm->dirty_pages_log_all = 1;
+ return kvm_dirty_pages_log_change_all(kvm,
+ kvm_dirty_pages_log_enable_slot);
+}
+
+/**
+ * Enable dirty page logging only for memory regions that were created with
+ * dirty logging enabled (disable for all other memory regions).
+ */
+int kvm_dirty_pages_log_reset(kvm_context_t kvm)
+{
+ if (!kvm->dirty_pages_log_all)
+ return 0;
+ kvm->dirty_pages_log_all = 0;
+ return kvm_dirty_pages_log_change_all(kvm,
+ kvm_dirty_pages_log_disable_slot);
+}
+
+
+kvm_context_t kvm_init(struct kvm_callbacks *callbacks,
+ void *opaque)
+{
+ int fd;
+ kvm_context_t kvm;
+ int r, gsi_count;
+
+ fd = open("/dev/kvm", O_RDWR);
+ if (fd == -1) {
+ perror("open /dev/kvm");
+ return NULL;
+ }
+ r = ioctl(fd, KVM_GET_API_VERSION, 0);
+ if (r == -1) {
+ fprintf(stderr, "kvm kernel version too old: "
+ "KVM_GET_API_VERSION ioctl not supported\n");
+ goto out_close;
+ }
+ if (r < EXPECTED_KVM_API_VERSION) {
+ fprintf(stderr, "kvm kernel version too old: "
+ "We expect API version %d or newer, but got "
+ "version %d\n",
+ EXPECTED_KVM_API_VERSION, r);
+ goto out_close;
+ }
+ if (r > EXPECTED_KVM_API_VERSION) {
+ fprintf(stderr, "kvm userspace version too old\n");
+ goto out_close;
+ }
+ kvm_abi = r;
+ kvm_page_size = getpagesize();
+ kvm = malloc(sizeof(*kvm));
+ if (kvm == NULL)
+ goto out_close;
+ memset(kvm, 0, sizeof(*kvm));
+ kvm->fd = fd;
+ kvm->vm_fd = -1;
+ kvm->callbacks = callbacks;
+ kvm->opaque = opaque;
+ kvm->dirty_pages_log_all = 0;
+ kvm->no_irqchip_creation = 0;
+ kvm->no_pit_creation = 0;
+
+ gsi_count = kvm_get_gsi_count(kvm);
+ if (gsi_count > 0) {
+ int gsi_bits, i;
+
+ /* Round up so we can search ints using ffs */
+ gsi_bits = ALIGN(gsi_count, 32);
+ kvm->used_gsi_bitmap = malloc(gsi_bits / 8);
+ if (!kvm->used_gsi_bitmap)
+ goto out_close;
+ memset(kvm->used_gsi_bitmap, 0, gsi_bits / 8);
+ kvm->max_gsi = gsi_bits;
+
+ /* Mark any over-allocated bits as already in use */
+ for (i = gsi_count; i < gsi_bits; i++)
+ set_gsi(kvm, i);
+ }
+
+ return kvm;
+ out_close:
+ close(fd);
+ return NULL;
+}
+
+void kvm_finalize(kvm_context_t kvm)
+{
+ if (kvm->vcpu_fd[0] != -1)
+ close(kvm->vcpu_fd[0]);
+ if (kvm->vm_fd != -1)
+ close(kvm->vm_fd);
+ close(kvm->fd);
+ free(kvm);
+}
+
+void kvm_disable_irqchip_creation(kvm_context_t kvm)
+{
+ kvm->no_irqchip_creation = 1;
+}
+
+void kvm_disable_pit_creation(kvm_context_t kvm)
+{
+ kvm->no_pit_creation = 1;
+}
+
+int kvm_create_vcpu(kvm_context_t kvm, int slot)
+{
+ long mmap_size;
+ int r;
+
+ r = ioctl(kvm->vm_fd, KVM_CREATE_VCPU, slot);
+ if (r == -1) {
+ r = -errno;
+ fprintf(stderr, "kvm_create_vcpu: %m\n");
+ return r;
+ }
+ kvm->vcpu_fd[slot] = r;
+ mmap_size = ioctl(kvm->fd, KVM_GET_VCPU_MMAP_SIZE, 0);
+ if (mmap_size == -1) {
+ r = -errno;
+ fprintf(stderr, "get vcpu mmap size: %m\n");
+ return r;
+ }
+ kvm->run[slot] = mmap(NULL, mmap_size, PROT_READ|PROT_WRITE, MAP_SHARED,
+ kvm->vcpu_fd[slot], 0);
+ if (kvm->run[slot] == MAP_FAILED) {
+ r = -errno;
+ fprintf(stderr, "mmap vcpu area: %m\n");
+ return r;
+ }
+ return 0;
+}
+
+int kvm_create_vm(kvm_context_t kvm)
+{
+ int fd = kvm->fd;
+
+#ifdef KVM_CAP_IRQ_ROUTING
+ kvm->irq_routes = malloc(sizeof(*kvm->irq_routes));
+ if (!kvm->irq_routes)
+ return -ENOMEM;
+ memset(kvm->irq_routes, 0, sizeof(*kvm->irq_routes));
+ kvm->nr_allocated_irq_routes = 0;
+#endif
+
+ kvm->vcpu_fd[0] = -1;
+
+ fd = ioctl(fd, KVM_CREATE_VM, 0);
+ if (fd == -1) {
+ fprintf(stderr, "kvm_create_vm: %m\n");
+ return -1;
+ }
+ kvm->vm_fd = fd;
+ return 0;
+}
+
+static int kvm_create_default_phys_mem(kvm_context_t kvm,
+ unsigned long phys_mem_bytes,
+ void **vm_mem)
+{
+#ifdef KVM_CAP_USER_MEMORY
+ int r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_USER_MEMORY);
+ if (r > 0)
+ return 0;
+ fprintf(stderr, "Hypervisor too old: KVM_CAP_USER_MEMORY extension not supported\n");
+#else
+#error Hypervisor too old: KVM_CAP_USER_MEMORY extension not supported
+#endif
+ return -1;
+}
+
+int kvm_check_extension(kvm_context_t kvm, int ext)
+{
+ int ret;
+
+ ret = ioctl(kvm->fd, KVM_CHECK_EXTENSION, ext);
+ if (ret > 0)
+ return ret;
+ return 0;
+}
+
+void kvm_create_irqchip(kvm_context_t kvm)
+{
+ int r;
+
+ kvm->irqchip_in_kernel = 0;
+#ifdef KVM_CAP_IRQCHIP
+ if (!kvm->no_irqchip_creation) {
+ r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_IRQCHIP);
+ if (r > 0) { /* kernel irqchip supported */
+ r = ioctl(kvm->vm_fd, KVM_CREATE_IRQCHIP);
+ if (r >= 0) {
+ kvm->irqchip_inject_ioctl = KVM_IRQ_LINE;
+#if defined(KVM_CAP_IRQ_INJECT_STATUS) && defined(KVM_IRQ_LINE_STATUS)
+ r = ioctl(kvm->fd, KVM_CHECK_EXTENSION,
+ KVM_CAP_IRQ_INJECT_STATUS);
+ if (r > 0)
+ kvm->irqchip_inject_ioctl = KVM_IRQ_LINE_STATUS;
+#endif
+ kvm->irqchip_in_kernel = 1;
+ }
+ else
+ fprintf(stderr, "Create kernel PIC irqchip failed\n");
+ }
+ }
+#endif
+}
+
+int kvm_create(kvm_context_t kvm, unsigned long phys_mem_bytes, void **vm_mem)
+{
+ int r;
+
+ r = kvm_create_vm(kvm);
+ if (r < 0)
+ return r;
+ r = kvm_arch_create(kvm, phys_mem_bytes, vm_mem);
+ if (r < 0)
+ return r;
+ init_slots();
+ r = kvm_create_default_phys_mem(kvm, phys_mem_bytes, vm_mem);
+ if (r < 0)
+ return r;
+ kvm_create_irqchip(kvm);
+
+ return 0;
+}
+
+
+void *kvm_create_phys_mem(kvm_context_t kvm, unsigned long phys_start,
+ unsigned long len, int log, int writable)
+{
+ int r;
+ int prot = PROT_READ;
+ void *ptr;
+ struct kvm_userspace_memory_region memory = {
+ .memory_size = len,
+ .guest_phys_addr = phys_start,
+ .flags = log ? KVM_MEM_LOG_DIRTY_PAGES : 0,
+ };
+
+ if (writable)
+ prot |= PROT_WRITE;
+
+#if !defined(__s390__)
+ ptr = mmap(NULL, len, prot, MAP_ANONYMOUS | MAP_SHARED, -1, 0);
+#else
+ ptr = mmap(LIBKVM_S390_ORIGIN, len, prot | PROT_EXEC,
+ MAP_FIXED | MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+#endif
+ if (ptr == MAP_FAILED) {
+ fprintf(stderr, "%s: %s", __func__, strerror(errno));
+ return 0;
+ }
+
+ memset(ptr, 0, len);
+
+ memory.userspace_addr = (unsigned long)ptr;
+ memory.slot = get_free_slot(kvm);
+ DPRINTF("slot %d start %llx len %llx flags %x\n",
+ memory.slot,
+ memory.guest_phys_addr,
+ memory.memory_size,
+ memory.flags);
+ r = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &memory);
+ if (r == -1) {
+ fprintf(stderr, "%s: %s", __func__, strerror(errno));
+ return 0;
+ }
+ register_slot(memory.slot, memory.guest_phys_addr, memory.memory_size,
+ memory.userspace_addr, memory.flags);
+
+ return ptr;
+}
+
+int kvm_register_phys_mem(kvm_context_t kvm,
+ unsigned long phys_start, void *userspace_addr,
+ unsigned long len, int log)
+{
+
+ struct kvm_userspace_memory_region memory = {
+ .memory_size = len,
+ .guest_phys_addr = phys_start,
+ .userspace_addr = (unsigned long)(intptr_t)userspace_addr,
+ .flags = log ? KVM_MEM_LOG_DIRTY_PAGES : 0,
+ };
+ int r;
+
+ memory.slot = get_free_slot(kvm);
+ DPRINTF("memory: gpa: %llx, size: %llx, uaddr: %llx, slot: %x, flags: %lx\n",
+ memory.guest_phys_addr, memory.memory_size,
+ memory.userspace_addr, memory.slot, memory.flags);
+ r = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &memory);
+ if (r == -1) {
+ fprintf(stderr, "create_userspace_phys_mem: %s\n", strerror(errno));
+ return -1;
+ }
+ register_slot(memory.slot, memory.guest_phys_addr, memory.memory_size,
+ memory.userspace_addr, memory.flags);
+ return 0;
+}
+
+
+/* destroy/free a whole slot.
+ * phys_start, len and slot are the params passed to kvm_create_phys_mem()
+ */
+void kvm_destroy_phys_mem(kvm_context_t kvm, unsigned long phys_start,
+ unsigned long len)
+{
+ int slot;
+ int r;
+ struct kvm_userspace_memory_region memory = {
+ .memory_size = 0,
+ .guest_phys_addr = phys_start,
+ .userspace_addr = 0,
+ .flags = 0,
+ };
+
+ slot = get_slot(phys_start);
+
+ if ((slot >= KVM_MAX_NUM_MEM_REGIONS) || (slot == -1)) {
+ fprintf(stderr, "BUG: %s: invalid parameters (slot=%d)\n",
+ __FUNCTION__, slot);
+ return;
+ }
+ if (phys_start != slots[slot].phys_addr) {
+ fprintf(stderr,
+ "WARNING: %s: phys_start is 0x%lx expecting 0x%lx\n",
+ __FUNCTION__, phys_start, slots[slot].phys_addr);
+ phys_start = slots[slot].phys_addr;
+ }
+
+ memory.slot = slot;
+ DPRINTF("slot %d start %llx len %llx flags %x\n",
+ memory.slot,
+ memory.guest_phys_addr,
+ memory.memory_size,
+ memory.flags);
+ r = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &memory);
+ if (r == -1) {
+ fprintf(stderr, "destroy_userspace_phys_mem: %s",
+ strerror(errno));
+ return;
+ }
+
+ free_slot(memory.slot);
+}
+
+void kvm_unregister_memory_area(kvm_context_t kvm, uint64_t phys_addr, unsigned long size)
+{
+
+ int slot = get_container_slot(phys_addr, size);
+
+ if (slot != -1) {
+ DPRINTF("Unregistering memory region %llx (%lx)\n", phys_addr, size);
+ kvm_destroy_phys_mem(kvm, phys_addr, size);
+ return;
+ }
+}
+
+static int kvm_get_map(kvm_context_t kvm, int ioctl_num, int slot, void *buf)
+{
+ int r;
+ struct kvm_dirty_log log = {
+ .slot = slot,
+ };
+
+ log.dirty_bitmap = buf;
+
+ r = ioctl(kvm->vm_fd, ioctl_num, &log);
+ if (r == -1)
+ return -errno;
+ return 0;
+}
+
+int kvm_get_dirty_pages(kvm_context_t kvm, unsigned long phys_addr, void *buf)
+{
+ int slot;
+
+ slot = get_slot(phys_addr);
+ return kvm_get_map(kvm, KVM_GET_DIRTY_LOG, slot, buf);
+}
+
+int kvm_get_dirty_pages_range(kvm_context_t kvm, unsigned long phys_addr,
+ unsigned long len, void *buf, void *opaque,
+ int (*cb)(unsigned long start, unsigned long len,
+ void*bitmap, void *opaque))
+{
+ int i;
+ int r;
+ unsigned long end_addr = phys_addr + len;
+
+ for (i = 0; i < KVM_MAX_NUM_MEM_REGIONS; ++i) {
+ if ((slots[i].len && (uint64_t)slots[i].phys_addr >= phys_addr)
+ && ((uint64_t)slots[i].phys_addr + slots[i].len <= end_addr)) {
+ r = kvm_get_map(kvm, KVM_GET_DIRTY_LOG, i, buf);
+ if (r)
+ return r;
+ r = cb(slots[i].phys_addr, slots[i].len, buf, opaque);
+ if (r)
+ return r;
+ }
+ }
+ return 0;
+}
+
+#ifdef KVM_CAP_IRQCHIP
+
+int kvm_set_irq_level(kvm_context_t kvm, int irq, int level, int *status)
+{
+ struct kvm_irq_level event;
+ int r;
+
+ if (!kvm->irqchip_in_kernel)
+ return 0;
+ event.level = level;
+ event.irq = irq;
+ r = ioctl(kvm->vm_fd, kvm->irqchip_inject_ioctl, &event);
+ if (r == -1)
+ perror("kvm_set_irq_level");
+
+ if (status) {
+#ifdef KVM_CAP_IRQ_INJECT_STATUS
+ *status = (kvm->irqchip_inject_ioctl == KVM_IRQ_LINE) ?
+ 1 : event.status;
+#else
+ *status = 1;
+#endif
+ }
+
+ return 1;
+}
+
+int kvm_get_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip)
+{
+ int r;
+
+ if (!kvm->irqchip_in_kernel)
+ return 0;
+ r = ioctl(kvm->vm_fd, KVM_GET_IRQCHIP, chip);
+ if (r == -1) {
+ r = -errno;
+ perror("kvm_get_irqchip\n");
+ }
+ return r;
+}
+
+int kvm_set_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip)
+{
+ int r;
+
+ if (!kvm->irqchip_in_kernel)
+ return 0;
+ r = ioctl(kvm->vm_fd, KVM_SET_IRQCHIP, chip);
+ if (r == -1) {
+ r = -errno;
+ perror("kvm_set_irqchip\n");
+ }
+ return r;
+}
+
+#endif
+
+static int handle_io(kvm_context_t kvm, struct kvm_run *run, int vcpu)
+{
+ uint16_t addr = run->io.port;
+ int r;
+ int i;
+ void *p = (void *)run + run->io.data_offset;
+
+ for (i = 0; i < run->io.count; ++i) {
+ switch (run->io.direction) {
+ case KVM_EXIT_IO_IN:
+ switch (run->io.size) {
+ case 1:
+ r = kvm->callbacks->inb(kvm->opaque, addr, p);
+ break;
+ case 2:
+ r = kvm->callbacks->inw(kvm->opaque, addr, p);
+ break;
+ case 4:
+ r = kvm->callbacks->inl(kvm->opaque, addr, p);
+ break;
+ default:
+ fprintf(stderr, "bad I/O size %d\n", run->io.size);
+ return -EMSGSIZE;
+ }
+ break;
+ case KVM_EXIT_IO_OUT:
+ switch (run->io.size) {
+ case 1:
+ r = kvm->callbacks->outb(kvm->opaque, addr,
+ *(uint8_t *)p);
+ break;
+ case 2:
+ r = kvm->callbacks->outw(kvm->opaque, addr,
+ *(uint16_t *)p);
+ break;
+ case 4:
+ r = kvm->callbacks->outl(kvm->opaque, addr,
+ *(uint32_t *)p);
+ break;
+ default:
+ fprintf(stderr, "bad I/O size %d\n", run->io.size);
+ return -EMSGSIZE;
+ }
+ break;
+ default:
+ fprintf(stderr, "bad I/O direction %d\n", run->io.direction);
+ return -EPROTO;
+ }
+
+ p += run->io.size;
+ }
+
+ return 0;
+}
+
+int handle_debug(kvm_context_t kvm, int vcpu, void *env)
+{
+#ifdef KVM_CAP_SET_GUEST_DEBUG
+ struct kvm_run *run = kvm->run[vcpu];
+
+ return kvm->callbacks->debug(kvm->opaque, env, &run->debug.arch);
+#else
+ return 0;
+#endif
+}
+
+int kvm_get_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs)
+{
+ return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_REGS, regs);
+}
+
+int kvm_set_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs)
+{
+ return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_REGS, regs);
+}
+
+int kvm_get_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu)
+{
+ return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_FPU, fpu);
+}
+
+int kvm_set_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu)
+{
+ return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_FPU, fpu);
+}
+
+int kvm_get_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *sregs)
+{
+ return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_SREGS, sregs);
+}
+
+int kvm_set_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *sregs)
+{
+ return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_SREGS, sregs);
+}
+
+#ifdef KVM_CAP_MP_STATE
+int kvm_get_mpstate(kvm_context_t kvm, int vcpu, struct kvm_mp_state *mp_state)
+{
+ int r;
+
+ r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_MP_STATE);
+ if (r > 0)
+ return ioctl(kvm->vcpu_fd[vcpu], KVM_GET_MP_STATE, mp_state);
+ return -ENOSYS;
+}
+
+int kvm_set_mpstate(kvm_context_t kvm, int vcpu, struct kvm_mp_state *mp_state)
+{
+ int r;
+
+ r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_MP_STATE);
+ if (r > 0)
+ return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_MP_STATE, mp_state);
+ return -ENOSYS;
+}
+#endif
+
+static int handle_mmio(kvm_context_t kvm, struct kvm_run *kvm_run)
+{
+ unsigned long addr = kvm_run->mmio.phys_addr;
+ void *data = kvm_run->mmio.data;
+
+ /* hack: Red Hat 7.1 generates these weird accesses. */
+ if ((addr > 0xa0000-4 && addr <= 0xa0000) && kvm_run->mmio.len == 3)
+ return 0;
+
+ if (kvm_run->mmio.is_write)
+ return kvm->callbacks->mmio_write(kvm->opaque, addr, data,
+ kvm_run->mmio.len);
+ else
+ return kvm->callbacks->mmio_read(kvm->opaque, addr, data,
+ kvm_run->mmio.len);
+}
+
+int handle_io_window(kvm_context_t kvm)
+{
+ return kvm->callbacks->io_window(kvm->opaque);
+}
+
+int handle_halt(kvm_context_t kvm, int vcpu)
+{
+ return kvm->callbacks->halt(kvm->opaque, vcpu);
+}
+
+int handle_shutdown(kvm_context_t kvm, void *env)
+{
+ return kvm->callbacks->shutdown(kvm->opaque, env);
+}
+
+int try_push_interrupts(kvm_context_t kvm)
+{
+ return kvm->callbacks->try_push_interrupts(kvm->opaque);
+}
+
+static inline void push_nmi(kvm_context_t kvm)
+{
+#ifdef KVM_CAP_USER_NMI
+ kvm->callbacks->push_nmi(kvm->opaque);
+#endif /* KVM_CAP_USER_NMI */
+}
+
+void post_kvm_run(kvm_context_t kvm, void *env)
+{
+ kvm->callbacks->post_kvm_run(kvm->opaque, env);
+}
+
+int pre_kvm_run(kvm_context_t kvm, void *env)
+{
+ return kvm->callbacks->pre_kvm_run(kvm->opaque, env);
+}
+
+int kvm_get_interrupt_flag(kvm_context_t kvm, int vcpu)
+{
+ struct kvm_run *run = kvm->run[vcpu];
+
+ return run->if_flag;
+}
+
+int kvm_is_ready_for_interrupt_injection(kvm_context_t kvm, int vcpu)
+{
+ struct kvm_run *run = kvm->run[vcpu];
+
+ return run->ready_for_interrupt_injection;
+}
+
+int kvm_run(kvm_context_t kvm, int vcpu, void *env)
+{
+ int r;
+ int fd = kvm->vcpu_fd[vcpu];
+ struct kvm_run *run = kvm->run[vcpu];
+
+again:
+ push_nmi(kvm);
+#if !defined(__s390__)
+ if (!kvm->irqchip_in_kernel)
+ run->request_interrupt_window = try_push_interrupts(kvm);
+#endif
+ r = pre_kvm_run(kvm, env);
+ if (r)
+ return r;
+ r = ioctl(fd, KVM_RUN, 0);
+
+ if (r == -1 && errno != EINTR && errno != EAGAIN) {
+ r = -errno;
+ post_kvm_run(kvm, env);
+ fprintf(stderr, "kvm_run: %s\n", strerror(-r));
+ return r;
+ }
+
+ post_kvm_run(kvm, env);
+
+#if defined(KVM_CAP_COALESCED_MMIO)
+ if (kvm->coalesced_mmio) {
+ struct kvm_coalesced_mmio_ring *ring = (void *)run +
+ kvm->coalesced_mmio * PAGE_SIZE;
+ while (ring->first != ring->last) {
+ kvm->callbacks->mmio_write(kvm->opaque,
+ ring->coalesced_mmio[ring->first].phys_addr,
+ &ring->coalesced_mmio[ring->first].data[0],
+ ring->coalesced_mmio[ring->first].len);
+ smp_wmb();
+ ring->first = (ring->first + 1) %
+ KVM_COALESCED_MMIO_MAX;
+ }
+ }
+#endif
+
+#if !defined(__s390__)
+ if (r == -1) {
+ r = handle_io_window(kvm);
+ goto more;
+ }
+#endif
+ if (1) {
+ switch (run->exit_reason) {
+ case KVM_EXIT_UNKNOWN:
+ fprintf(stderr, "unhandled vm exit: 0x%x vcpu_id %d\n",
+ (unsigned)run->hw.hardware_exit_reason, vcpu);
+ kvm_show_regs(kvm, vcpu);
+ abort();
+ break;
+ case KVM_EXIT_FAIL_ENTRY:
+ fprintf(stderr, "kvm_run: failed entry, reason %u\n",
+ (unsigned)run->fail_entry.hardware_entry_failure_reason & 0xffff);
+ kvm_show_regs(kvm, vcpu);
+ return -ENOEXEC;
+ break;
+ case KVM_EXIT_EXCEPTION:
+ fprintf(stderr, "exception %d (%x)\n",
+ run->ex.exception,
+ run->ex.error_code);
+ kvm_show_regs(kvm, vcpu);
+ kvm_show_code(kvm, vcpu);
+ abort();
+ break;
+ case KVM_EXIT_IO:
+ r = handle_io(kvm, run, vcpu);
+ break;
+ case KVM_EXIT_DEBUG:
+ r = handle_debug(kvm, vcpu, env);
+ break;
+ case KVM_EXIT_MMIO:
+ r = handle_mmio(kvm, run);
+ break;
+ case KVM_EXIT_HLT:
+ r = handle_halt(kvm, vcpu);
+ break;
+ case KVM_EXIT_IRQ_WINDOW_OPEN:
+ break;
+ case KVM_EXIT_SHUTDOWN:
+ r = handle_shutdown(kvm, env);
+ break;
+#if defined(__s390__)
+ case KVM_EXIT_S390_SIEIC:
+ r = kvm->callbacks->s390_handle_intercept(kvm, vcpu,
+ run);
+ break;
+ case KVM_EXIT_S390_RESET:
+ r = kvm->callbacks->s390_handle_reset(kvm, vcpu, run);
+ break;
+#endif
+ default:
+ if (kvm_arch_run(run, kvm, vcpu)) {
+ fprintf(stderr, "unhandled vm exit: 0x%x\n",
+ run->exit_reason);
+ kvm_show_regs(kvm, vcpu);
+ abort();
+ }
+ break;
+ }
+ }
+more:
+ if (!r)
+ goto again;
+ return r;
+}
+
+int kvm_inject_irq(kvm_context_t kvm, int vcpu, unsigned irq)
+{
+ struct kvm_interrupt intr;
+
+ intr.irq = irq;
+ return ioctl(kvm->vcpu_fd[vcpu], KVM_INTERRUPT, &intr);
+}
+
+#ifdef KVM_CAP_SET_GUEST_DEBUG
+int kvm_set_guest_debug(kvm_context_t kvm, int vcpu, struct kvm_guest_debug *dbg)
+{
+ return ioctl(kvm->vcpu_fd[vcpu], KVM_SET_GUEST_DEBUG, dbg);
+}
+#endif
+
+int kvm_set_signal_mask(kvm_context_t kvm, int vcpu, const sigset_t *sigset)
+{
+ struct kvm_signal_mask *sigmask;
+ int r;
+
+ if (!sigset) {
+ r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_SIGNAL_MASK, NULL);
+ if (r == -1)
+ r = -errno;
+ return r;
+ }
+ sigmask = malloc(sizeof(*sigmask) + sizeof(*sigset));
+ if (!sigmask)
+ return -ENOMEM;
+
+ sigmask->len = 8;
+ memcpy(sigmask->sigset, sigset, sizeof(*sigset));
+ r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_SIGNAL_MASK, sigmask);
+ if (r == -1)
+ r = -errno;
+ free(sigmask);
+ return r;
+}
+
+int kvm_irqchip_in_kernel(kvm_context_t kvm)
+{
+ return kvm->irqchip_in_kernel;
+}
+
+int kvm_pit_in_kernel(kvm_context_t kvm)
+{
+ return kvm->pit_in_kernel;
+}
+
+int kvm_has_sync_mmu(kvm_context_t kvm)
+{
+ int r = 0;
+#ifdef KVM_CAP_SYNC_MMU
+ r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_SYNC_MMU);
+#endif
+ return r;
+}
+
+int kvm_inject_nmi(kvm_context_t kvm, int vcpu)
+{
+#ifdef KVM_CAP_USER_NMI
+ return ioctl(kvm->vcpu_fd[vcpu], KVM_NMI);
+#else
+ return -ENOSYS;
+#endif
+}
+
+int kvm_init_coalesced_mmio(kvm_context_t kvm)
+{
+ int r = 0;
+ kvm->coalesced_mmio = 0;
+#ifdef KVM_CAP_COALESCED_MMIO
+ r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_COALESCED_MMIO);
+ if (r > 0) {
+ kvm->coalesced_mmio = r;
+ return 0;
+ }
+#endif
+ return r;
+}
+
+int kvm_register_coalesced_mmio(kvm_context_t kvm, uint64_t addr, uint32_t size)
+{
+#ifdef KVM_CAP_COALESCED_MMIO
+ struct kvm_coalesced_mmio_zone zone;
+ int r;
+
+ if (kvm->coalesced_mmio) {
+
+ zone.addr = addr;
+ zone.size = size;
+
+ r = ioctl(kvm->vm_fd, KVM_REGISTER_COALESCED_MMIO, &zone);
+ if (r == -1) {
+ perror("kvm_register_coalesced_mmio_zone");
+ return -errno;
+ }
+ return 0;
+ }
+#endif
+ return -ENOSYS;
+}
+
+int kvm_unregister_coalesced_mmio(kvm_context_t kvm, uint64_t addr, uint32_t size)
+{
+#ifdef KVM_CAP_COALESCED_MMIO
+ struct kvm_coalesced_mmio_zone zone;
+ int r;
+
+ if (kvm->coalesced_mmio) {
+
+ zone.addr = addr;
+ zone.size = size;
+
+ r = ioctl(kvm->vm_fd, KVM_UNREGISTER_COALESCED_MMIO, &zone);
+ if (r == -1) {
+ perror("kvm_unregister_coalesced_mmio_zone");
+ return -errno;
+ }
+ DPRINTF("Unregistered coalesced mmio region for %llx (%lx)\n", addr, size);
+ return 0;
+ }
+#endif
+ return -ENOSYS;
+}
+
+#ifdef KVM_CAP_DEVICE_ASSIGNMENT
+int kvm_assign_pci_device(kvm_context_t kvm,
+ struct kvm_assigned_pci_dev *assigned_dev)
+{
+ int ret;
+
+ ret = ioctl(kvm->vm_fd, KVM_ASSIGN_PCI_DEVICE, assigned_dev);
+ if (ret < 0)
+ return -errno;
+
+ return ret;
+}
+
+static int kvm_old_assign_irq(kvm_context_t kvm,
+ struct kvm_assigned_irq *assigned_irq)
+{
+ int ret;
+
+ ret = ioctl(kvm->vm_fd, KVM_ASSIGN_IRQ, assigned_irq);
+ if (ret < 0)
+ return -errno;
+
+ return ret;
+}
+
+#ifdef KVM_CAP_ASSIGN_DEV_IRQ
+int kvm_assign_irq(kvm_context_t kvm,
+ struct kvm_assigned_irq *assigned_irq)
+{
+ int ret;
+
+ ret = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_ASSIGN_DEV_IRQ);
+ if (ret > 0) {
+ ret = ioctl(kvm->vm_fd, KVM_ASSIGN_DEV_IRQ, assigned_irq);
+ if (ret < 0)
+ return -errno;
+ return ret;
+ }
+
+ return kvm_old_assign_irq(kvm, assigned_irq);
+}
+
+int kvm_deassign_irq(kvm_context_t kvm,
+ struct kvm_assigned_irq *assigned_irq)
+{
+ int ret;
+
+ ret = ioctl(kvm->vm_fd, KVM_DEASSIGN_DEV_IRQ, assigned_irq);
+ if (ret < 0)
+ return -errno;
+
+ return ret;
+}
+#else
+int kvm_assign_irq(kvm_context_t kvm,
+ struct kvm_assigned_irq *assigned_irq)
+{
+ return kvm_old_assign_irq(kvm, assigned_irq);
+}
+#endif
+#endif
+
+#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
+int kvm_deassign_pci_device(kvm_context_t kvm,
+ struct kvm_assigned_pci_dev *assigned_dev)
+{
+ int ret;
+
+ ret = ioctl(kvm->vm_fd, KVM_DEASSIGN_PCI_DEVICE, assigned_dev);
+ if (ret < 0)
+ return -errno;
+
+ return ret;
+}
+#endif
+
+int kvm_destroy_memory_region_works(kvm_context_t kvm)
+{
+ int ret = 0;
+
+#ifdef KVM_CAP_DESTROY_MEMORY_REGION_WORKS
+ ret = ioctl(kvm->fd, KVM_CHECK_EXTENSION,
+ KVM_CAP_DESTROY_MEMORY_REGION_WORKS);
+ if (ret <= 0)
+ ret = 0;
+#endif
+ return ret;
+}
+
+int kvm_reinject_control(kvm_context_t kvm, int pit_reinject)
+{
+#ifdef KVM_CAP_REINJECT_CONTROL
+ int r;
+ struct kvm_reinject_control control;
+
+ control.pit_reinject = pit_reinject;
+
+ r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_REINJECT_CONTROL);
+ if (r > 0) {
+ r = ioctl(kvm->vm_fd, KVM_REINJECT_CONTROL, &control);
+ if (r == -1)
+ return -errno;
+ return r;
+ }
+#endif
+ return -ENOSYS;
+}
+
+int kvm_has_gsi_routing(kvm_context_t kvm)
+{
+ int r = 0;
+
+#ifdef KVM_CAP_IRQ_ROUTING
+ r = kvm_check_extension(kvm, KVM_CAP_IRQ_ROUTING);
+#endif
+ return r;
+}
+
+int kvm_get_gsi_count(kvm_context_t kvm)
+{
+#ifdef KVM_CAP_IRQ_ROUTING
+ return kvm_check_extension(kvm, KVM_CAP_IRQ_ROUTING);
+#else
+ return -EINVAL;
+#endif
+}
+
+int kvm_clear_gsi_routes(kvm_context_t kvm)
+{
+#ifdef KVM_CAP_IRQ_ROUTING
+ kvm->irq_routes->nr = 0;
+ return 0;
+#else
+ return -EINVAL;
+#endif
+}
+
+int kvm_add_routing_entry(kvm_context_t kvm,
+ struct kvm_irq_routing_entry* entry)
+{
+#ifdef KVM_CAP_IRQ_ROUTING
+ struct kvm_irq_routing *z;
+ struct kvm_irq_routing_entry *new;
+ int n, size;
+
+ if (kvm->irq_routes->nr == kvm->nr_allocated_irq_routes) {
+ n = kvm->nr_allocated_irq_routes * 2;
+ if (n < 64)
+ n = 64;
+ size = sizeof(struct kvm_irq_routing);
+ size += n * sizeof(*new);
+ z = realloc(kvm->irq_routes, size);
+ if (!z)
+ return -ENOMEM;
+ kvm->nr_allocated_irq_routes = n;
+ kvm->irq_routes = z;
+ }
+ n = kvm->irq_routes->nr++;
+ new = &kvm->irq_routes->entries[n];
+ memset(new, 0, sizeof(*new));
+ new->gsi = entry->gsi;
+ new->type = entry->type;
+ new->flags = entry->flags;
+ new->u = entry->u;
+
+ set_gsi(kvm, entry->gsi);
+
+ return 0;
+#else
+ return -ENOSYS;
+#endif
+}
+
+int kvm_add_irq_route(kvm_context_t kvm, int gsi, int irqchip, int pin)
+{
+#ifdef KVM_CAP_IRQ_ROUTING
+ struct kvm_irq_routing_entry e;
+
+ e.gsi = gsi;
+ e.type = KVM_IRQ_ROUTING_IRQCHIP;
+ e.flags = 0;
+ e.u.irqchip.irqchip = irqchip;
+ e.u.irqchip.pin = pin;
+ return kvm_add_routing_entry(kvm, &e);
+#else
+ return -ENOSYS;
+#endif
+}
+
+int kvm_del_routing_entry(kvm_context_t kvm,
+ struct kvm_irq_routing_entry* entry)
+{
+#ifdef KVM_CAP_IRQ_ROUTING
+ struct kvm_irq_routing_entry *e, *p;
+ int i, gsi, found = 0;
+
+ gsi = entry->gsi;
+
+ for (i = 0; i < kvm->irq_routes->nr; ++i) {
+ e = &kvm->irq_routes->entries[i];
+ if (e->type == entry->type
+ && e->gsi == gsi) {
+ switch (e->type)
+ {
+ case KVM_IRQ_ROUTING_IRQCHIP: {
+ if (e->u.irqchip.irqchip ==
+ entry->u.irqchip.irqchip
+ && e->u.irqchip.pin ==
+ entry->u.irqchip.pin) {
+ p = &kvm->irq_routes->
+ entries[--kvm->irq_routes->nr];
+ *e = *p;
+ found = 1;
+ }
+ break;
+ }
+ case KVM_IRQ_ROUTING_MSI: {
+ if (e->u.msi.address_lo ==
+ entry->u.msi.address_lo
+ && e->u.msi.address_hi ==
+ entry->u.msi.address_hi
+ && e->u.msi.data == entry->u.msi.data) {
+ p = &kvm->irq_routes->
+ entries[--kvm->irq_routes->nr];
+ *e = *p;
+ found = 1;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ if (found) {
+ /* If there are no other users of this GSI
+ * mark it available in the bitmap */
+ for (i = 0; i < kvm->irq_routes->nr; i++) {
+ e = &kvm->irq_routes->entries[i];
+ if (e->gsi == gsi)
+ break;
+ }
+ if (i == kvm->irq_routes->nr)
+ clear_gsi(kvm, gsi);
+
+ return 0;
+ }
+ }
+ }
+ return -ESRCH;
+#else
+ return -ENOSYS;
+#endif
+}
+
+int kvm_del_irq_route(kvm_context_t kvm, int gsi, int irqchip, int pin)
+{
+#ifdef KVM_CAP_IRQ_ROUTING
+ struct kvm_irq_routing_entry e;
+
+ e.gsi = gsi;
+ e.type = KVM_IRQ_ROUTING_IRQCHIP;
+ e.flags = 0;
+ e.u.irqchip.irqchip = irqchip;
+ e.u.irqchip.pin = pin;
+ return kvm_del_routing_entry(kvm, &e);
+#else
+ return -ENOSYS;
+#endif
+}
+
+int kvm_commit_irq_routes(kvm_context_t kvm)
+{
+#ifdef KVM_CAP_IRQ_ROUTING
+ int r;
+
+ kvm->irq_routes->flags = 0;
+ r = ioctl(kvm->vm_fd, KVM_SET_GSI_ROUTING, kvm->irq_routes);
+ if (r == -1)
+ r = -errno;
+ return r;
+#else
+ return -ENOSYS;
+#endif
+}
+
+int kvm_get_irq_route_gsi(kvm_context_t kvm)
+{
+ int i, bit;
+ uint32_t *buf = kvm->used_gsi_bitmap;
+
+ /* Return the lowest unused GSI in the bitmap */
+ for (i = 0; i < kvm->max_gsi / 32; i++) {
+ bit = ffs(~buf[i]);
+ if (!bit)
+ continue;
+
+ return bit - 1 + i * 32;
+ }
+
+ return -ENOSPC;
+}
+
+#ifdef KVM_CAP_DEVICE_MSIX
+int kvm_assign_set_msix_nr(kvm_context_t kvm,
+ struct kvm_assigned_msix_nr *msix_nr)
+{
+ int ret;
+
+ ret = ioctl(kvm->vm_fd, KVM_ASSIGN_SET_MSIX_NR, msix_nr);
+ if (ret < 0)
+ return -errno;
+
+ return ret;
+}
+
+int kvm_assign_set_msix_entry(kvm_context_t kvm,
+ struct kvm_assigned_msix_entry *entry)
+{
+ int ret;
+
+ ret = ioctl(kvm->vm_fd, KVM_ASSIGN_SET_MSIX_ENTRY, entry);
+ if (ret < 0)
+ return -errno;
+
+ return ret;
+}
+#endif
diff --git a/kvm/libkvm/libkvm.h b/kvm/libkvm/libkvm.h
new file mode 100644
index 000000000..4821a1e4c
--- /dev/null
+++ b/kvm/libkvm/libkvm.h
@@ -0,0 +1,868 @@
+/** \file libkvm.h
+ * libkvm API
+ */
+
+#ifndef LIBKVM_H
+#define LIBKVM_H
+
+#if defined(__s390__)
+#include <asm/ptrace.h>
+#endif
+
+#include <stdint.h>
+
+#ifndef __user
+#define __user /* temporary, until installed via make headers_install */
+#endif
+
+#include <linux/kvm.h>
+
+#include <signal.h>
+
+struct kvm_context;
+
+typedef struct kvm_context *kvm_context_t;
+
+#if defined(__x86_64__) || defined(__i386__)
+struct kvm_msr_list *kvm_get_msr_list(kvm_context_t);
+int kvm_get_msrs(kvm_context_t, int vcpu, struct kvm_msr_entry *msrs, int n);
+int kvm_set_msrs(kvm_context_t, int vcpu, struct kvm_msr_entry *msrs, int n);
+#endif
+
+/*!
+ * \brief KVM callbacks structure
+ *
+ * This structure holds pointers to various functions that KVM will call
+ * when it encounters something that cannot be virtualized, such as
+ * accessing hardware devices via MMIO or regular IO.
+ */
+struct kvm_callbacks {
+ /// For 8bit IO reads from the guest (Usually when executing 'inb')
+ int (*inb)(void *opaque, uint16_t addr, uint8_t *data);
+ /// For 16bit IO reads from the guest (Usually when executing 'inw')
+ int (*inw)(void *opaque, uint16_t addr, uint16_t *data);
+ /// For 32bit IO reads from the guest (Usually when executing 'inl')
+ int (*inl)(void *opaque, uint16_t addr, uint32_t *data);
+ /// For 8bit IO writes from the guest (Usually when executing 'outb')
+ int (*outb)(void *opaque, uint16_t addr, uint8_t data);
+ /// For 16bit IO writes from the guest (Usually when executing 'outw')
+ int (*outw)(void *opaque, uint16_t addr, uint16_t data);
+ /// For 32bit IO writes from the guest (Usually when executing 'outl')
+ int (*outl)(void *opaque, uint16_t addr, uint32_t data);
+ /// generic memory reads to unmapped memory (For MMIO devices)
+ int (*mmio_read)(void *opaque, uint64_t addr, uint8_t *data,
+ int len);
+ /// generic memory writes to unmapped memory (For MMIO devices)
+ int (*mmio_write)(void *opaque, uint64_t addr, uint8_t *data,
+ int len);
+#ifdef KVM_CAP_SET_GUEST_DEBUG
+ int (*debug)(void *opaque, void *env,
+ struct kvm_debug_exit_arch *arch_info);
+#endif
+ /*!
+ * \brief Called when the VCPU issues an 'hlt' instruction.
+ *
+ * Typically, you should yeild here to prevent 100% CPU utilization
+ * on the host CPU.
+ */
+ int (*halt)(void *opaque, int vcpu);
+ int (*shutdown)(void *opaque, void *env);
+ int (*io_window)(void *opaque);
+ int (*try_push_interrupts)(void *opaque);
+#ifdef KVM_CAP_USER_NMI
+ void (*push_nmi)(void *opaque);
+#endif
+ void (*post_kvm_run)(void *opaque, void *env);
+ int (*pre_kvm_run)(void *opaque, void *env);
+ int (*tpr_access)(void *opaque, int vcpu, uint64_t rip, int is_write);
+#if defined(__powerpc__)
+ int (*powerpc_dcr_read)(int vcpu, uint32_t dcrn, uint32_t *data);
+ int (*powerpc_dcr_write)(int vcpu, uint32_t dcrn, uint32_t data);
+#endif
+#if defined(__s390__)
+ int (*s390_handle_intercept)(kvm_context_t context, int vcpu,
+ struct kvm_run *run);
+ int (*s390_handle_reset)(kvm_context_t context, int vcpu,
+ struct kvm_run *run);
+#endif
+};
+
+/*!
+ * \brief Create new KVM context
+ *
+ * This creates a new kvm_context. A KVM context is a small area of data that
+ * holds information about the KVM instance that gets created by this call.\n
+ * This should always be your first call to KVM.
+ *
+ * \param callbacks Pointer to a valid kvm_callbacks structure
+ * \param opaque Not used
+ * \return NULL on failure
+ */
+kvm_context_t kvm_init(struct kvm_callbacks *callbacks,
+ void *opaque);
+
+/*!
+ * \brief Cleanup the KVM context
+ *
+ * Should always be called when closing down KVM.\n
+ * Exception: If kvm_init() fails, this function should not be called, as the
+ * context would be invalid
+ *
+ * \param kvm Pointer to the kvm_context that is to be freed
+ */
+void kvm_finalize(kvm_context_t kvm);
+
+/*!
+ * \brief Disable the in-kernel IRQCHIP creation
+ *
+ * In-kernel irqchip is enabled by default. If userspace irqchip is to be used,
+ * this should be called prior to kvm_create().
+ *
+ * \param kvm Pointer to the kvm_context
+ */
+void kvm_disable_irqchip_creation(kvm_context_t kvm);
+
+/*!
+ * \brief Disable the in-kernel PIT creation
+ *
+ * In-kernel pit is enabled by default. If userspace pit is to be used,
+ * this should be called prior to kvm_create().
+ *
+ * \param kvm Pointer to the kvm_context
+ */
+void kvm_disable_pit_creation(kvm_context_t kvm);
+
+/*!
+ * \brief Create new virtual machine
+ *
+ * This creates a new virtual machine, maps physical RAM to it, and creates a
+ * virtual CPU for it.\n
+ * \n
+ * Memory gets mapped for addresses 0->0xA0000, 0xC0000->phys_mem_bytes
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param phys_mem_bytes The amount of physical ram you want the VM to have
+ * \param phys_mem This pointer will be set to point to the memory that
+ * kvm_create allocates for physical RAM
+ * \return 0 on success
+ */
+int kvm_create(kvm_context_t kvm,
+ unsigned long phys_mem_bytes,
+ void **phys_mem);
+int kvm_create_vm(kvm_context_t kvm);
+int kvm_check_extension(kvm_context_t kvm, int ext);
+void kvm_create_irqchip(kvm_context_t kvm);
+
+/*!
+ * \brief Create a new virtual cpu
+ *
+ * This creates a new virtual cpu (the first vcpu is created by kvm_create()).
+ * Should be called from a thread dedicated to the vcpu.
+ *
+ * \param kvm kvm context
+ * \param slot vcpu number (> 0)
+ * \return 0 on success, -errno on failure
+ */
+int kvm_create_vcpu(kvm_context_t kvm, int slot);
+
+/*!
+ * \brief Start the VCPU
+ *
+ * This starts the VCPU and virtualization is started.\n
+ * \n
+ * This function will not return until any of these conditions are met:
+ * - An IO/MMIO handler does not return "0"
+ * - An exception that neither the guest OS, nor KVM can handle occurs
+ *
+ * \note This function will call the callbacks registered in kvm_init()
+ * to emulate those functions
+ * \note If you at any point want to interrupt the VCPU, kvm_run() will
+ * listen to the EINTR signal. This allows you to simulate external interrupts
+ * and asyncronous IO.
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu Which virtual CPU should be started
+ * \return 0 on success, but you really shouldn't expect this function to
+ * return except for when an error has occured, or when you have sent it
+ * an EINTR signal.
+ */
+int kvm_run(kvm_context_t kvm, int vcpu, void *env);
+
+/*!
+ * \brief Get interrupt flag from on last exit to userspace
+ *
+ * This gets the CPU interrupt flag as it was on the last exit to userspace.
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu Which virtual CPU should get dumped
+ * \return interrupt flag value (0 or 1)
+ */
+int kvm_get_interrupt_flag(kvm_context_t kvm, int vcpu);
+
+/*!
+ * \brief Get the value of the APIC_BASE msr as of last exit to userspace
+ *
+ * This gets the APIC_BASE msr as it was on the last exit to userspace.
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu Which virtual CPU should get dumped
+ * \return APIC_BASE msr contents
+ */
+uint64_t kvm_get_apic_base(kvm_context_t kvm, int vcpu);
+
+/*!
+ * \brief Check if a vcpu is ready for interrupt injection
+ *
+ * This checks if vcpu interrupts are not masked by mov ss or sti.
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu Which virtual CPU should get dumped
+ * \return boolean indicating interrupt injection readiness
+ */
+int kvm_is_ready_for_interrupt_injection(kvm_context_t kvm, int vcpu);
+
+/*!
+ * \brief Read VCPU registers
+ *
+ * This gets the GP registers from the VCPU and outputs them
+ * into a kvm_regs structure
+ *
+ * \note This function returns a \b copy of the VCPUs registers.\n
+ * If you wish to modify the VCPUs GP registers, you should call kvm_set_regs()
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu Which virtual CPU should get dumped
+ * \param regs Pointer to a kvm_regs which will be populated with the VCPUs
+ * registers values
+ * \return 0 on success
+ */
+int kvm_get_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs);
+
+/*!
+ * \brief Write VCPU registers
+ *
+ * This sets the GP registers on the VCPU from a kvm_regs structure
+ *
+ * \note When this function returns, the regs pointer and the data it points to
+ * can be discarded
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu Which virtual CPU should get dumped
+ * \param regs Pointer to a kvm_regs which will be populated with the VCPUs
+ * registers values
+ * \return 0 on success
+ */
+int kvm_set_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs);
+/*!
+ * \brief Read VCPU fpu registers
+ *
+ * This gets the FPU registers from the VCPU and outputs them
+ * into a kvm_fpu structure
+ *
+ * \note This function returns a \b copy of the VCPUs registers.\n
+ * If you wish to modify the VCPU FPU registers, you should call kvm_set_fpu()
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu Which virtual CPU should get dumped
+ * \param fpu Pointer to a kvm_fpu which will be populated with the VCPUs
+ * fpu registers values
+ * \return 0 on success
+ */
+int kvm_get_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu);
+
+/*!
+ * \brief Write VCPU fpu registers
+ *
+ * This sets the FPU registers on the VCPU from a kvm_fpu structure
+ *
+ * \note When this function returns, the fpu pointer and the data it points to
+ * can be discarded
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu Which virtual CPU should get dumped
+ * \param fpu Pointer to a kvm_fpu which holds the new vcpu fpu state
+ * \return 0 on success
+ */
+int kvm_set_fpu(kvm_context_t kvm, int vcpu, struct kvm_fpu *fpu);
+
+/*!
+ * \brief Read VCPU system registers
+ *
+ * This gets the non-GP registers from the VCPU and outputs them
+ * into a kvm_sregs structure
+ *
+ * \note This function returns a \b copy of the VCPUs registers.\n
+ * If you wish to modify the VCPUs non-GP registers, you should call
+ * kvm_set_sregs()
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu Which virtual CPU should get dumped
+ * \param regs Pointer to a kvm_sregs which will be populated with the VCPUs
+ * registers values
+ * \return 0 on success
+ */
+int kvm_get_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *regs);
+
+/*!
+ * \brief Write VCPU system registers
+ *
+ * This sets the non-GP registers on the VCPU from a kvm_sregs structure
+ *
+ * \note When this function returns, the regs pointer and the data it points to
+ * can be discarded
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu Which virtual CPU should get dumped
+ * \param regs Pointer to a kvm_sregs which will be populated with the VCPUs
+ * registers values
+ * \return 0 on success
+ */
+int kvm_set_sregs(kvm_context_t kvm, int vcpu, struct kvm_sregs *regs);
+
+#ifdef KVM_CAP_MP_STATE
+/*!
+ * * \brief Read VCPU MP state
+ *
+ */
+int kvm_get_mpstate(kvm_context_t kvm, int vcpu,
+ struct kvm_mp_state *mp_state);
+
+/*!
+ * * \brief Write VCPU MP state
+ *
+ */
+int kvm_set_mpstate(kvm_context_t kvm, int vcpu,
+ struct kvm_mp_state *mp_state);
+/*!
+ * * \brief Reset VCPU MP state
+ *
+ */
+static inline int kvm_reset_mpstate(kvm_context_t kvm, int vcpu)
+{
+ struct kvm_mp_state mp_state = {.mp_state = KVM_MP_STATE_UNINITIALIZED};
+ return kvm_set_mpstate(kvm, vcpu, &mp_state);
+}
+#endif
+
+/*!
+ * \brief Simulate an external vectored interrupt
+ *
+ * This allows you to simulate an external vectored interrupt.
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu Which virtual CPU should get dumped
+ * \param irq Vector number
+ * \return 0 on success
+ */
+int kvm_inject_irq(kvm_context_t kvm, int vcpu, unsigned irq);
+
+#ifdef KVM_CAP_SET_GUEST_DEBUG
+int kvm_set_guest_debug(kvm_context_t, int vcpu, struct kvm_guest_debug *dbg);
+#endif
+
+#if defined(__i386__) || defined(__x86_64__)
+/*!
+ * \brief Setup a vcpu's cpuid instruction emulation
+ *
+ * Set up a table of cpuid function to cpuid outputs.\n
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu Which virtual CPU should be initialized
+ * \param nent number of entries to be installed
+ * \param entries cpuid function entries table
+ * \return 0 on success, or -errno on error
+ */
+int kvm_setup_cpuid(kvm_context_t kvm, int vcpu, int nent,
+ struct kvm_cpuid_entry *entries);
+
+/*!
+ * \brief Setup a vcpu's cpuid instruction emulation
+ *
+ * Set up a table of cpuid function to cpuid outputs.
+ * This call replaces the older kvm_setup_cpuid interface by adding a few
+ * parameters to support cpuid functions that have sub-leaf values.
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu Which virtual CPU should be initialized
+ * \param nent number of entries to be installed
+ * \param entries cpuid function entries table
+ * \return 0 on success, or -errno on error
+ */
+int kvm_setup_cpuid2(kvm_context_t kvm, int vcpu, int nent,
+ struct kvm_cpuid_entry2 *entries);
+
+/*!
+ * \brief Setting the number of shadow pages to be allocated to the vm
+ *
+ * \param kvm pointer to kvm_context
+ * \param nrshadow_pages number of pages to be allocated
+ */
+int kvm_set_shadow_pages(kvm_context_t kvm, unsigned int nrshadow_pages);
+
+/*!
+ * \brief Getting the number of shadow pages that are allocated to the vm
+ *
+ * \param kvm pointer to kvm_context
+ * \param nrshadow_pages number of pages to be allocated
+ */
+int kvm_get_shadow_pages(kvm_context_t kvm , unsigned int *nrshadow_pages);
+
+/*!
+ * \brief Set up cr8 for next time the vcpu is executed
+ *
+ * This is a fast setter for cr8, which will be applied when the
+ * vcpu next enters guest mode.
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu Which virtual CPU should get dumped
+ * \param cr8 next cr8 value
+ */
+void kvm_set_cr8(kvm_context_t kvm, int vcpu, uint64_t cr8);
+
+/*!
+ * \brief Get cr8 for sync tpr in qemu apic emulation
+ *
+ * This is a getter for cr8, which used to sync with the tpr in qemu
+ * apic emualtion.
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu Which virtual CPU should get dumped
+ */
+__u64 kvm_get_cr8(kvm_context_t kvm, int vcpu);
+#endif
+
+/*!
+ * \brief Set a vcpu's signal mask for guest mode
+ *
+ * A vcpu can have different signals blocked in guest mode and user mode.
+ * This allows guest execution to be interrupted on a signal, without requiring
+ * that the signal be delivered to a signal handler (the signal can be
+ * dequeued using sigwait(2).
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu Which virtual CPU should be initialized
+ * \param sigset signal mask for guest mode
+ * \return 0 on success, or -errno on error
+ */
+int kvm_set_signal_mask(kvm_context_t kvm, int vcpu, const sigset_t *sigset);
+
+/*!
+ * \brief Dump all VCPU information
+ *
+ * This dumps \b all the information that KVM has about a virtual CPU, namely:
+ * - GP Registers
+ * - System registers (selectors, descriptors, etc)
+ * - VMCS Data
+ * - MSRS
+ * - Pending interrupts
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu Which virtual CPU should get dumped
+ * \return 0 on success
+ */
+int kvm_dump_vcpu(kvm_context_t kvm, int vcpu);
+
+/*!
+ * \brief Dump VCPU registers
+ *
+ * This dumps some of the information that KVM has about a virtual CPU, namely:
+ * - GP Registers
+ *
+ * A much more verbose version of this is available as kvm_dump_vcpu()
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu Which virtual CPU should get dumped
+ * \return 0 on success
+ */
+void kvm_show_regs(kvm_context_t kvm, int vcpu);
+
+
+void *kvm_create_phys_mem(kvm_context_t, unsigned long phys_start,
+ unsigned long len, int log, int writable);
+void kvm_destroy_phys_mem(kvm_context_t, unsigned long phys_start,
+ unsigned long len);
+void kvm_unregister_memory_area(kvm_context_t, uint64_t phys_start,
+ unsigned long len);
+
+int kvm_is_containing_region(kvm_context_t kvm, unsigned long phys_start, unsigned long size);
+int kvm_register_phys_mem(kvm_context_t kvm,
+ unsigned long phys_start, void *userspace_addr,
+ unsigned long len, int log);
+int kvm_get_dirty_pages(kvm_context_t, unsigned long phys_addr, void *buf);
+int kvm_get_dirty_pages_range(kvm_context_t kvm, unsigned long phys_addr,
+ unsigned long end_addr, void *buf, void*opaque,
+ int (*cb)(unsigned long start, unsigned long len,
+ void*bitmap, void *opaque));
+int kvm_register_coalesced_mmio(kvm_context_t kvm,
+ uint64_t addr, uint32_t size);
+int kvm_unregister_coalesced_mmio(kvm_context_t kvm,
+ uint64_t addr, uint32_t size);
+
+/*!
+ * \brief Create a memory alias
+ *
+ * Aliases a portion of physical memory to another portion. If the guest
+ * accesses the alias region, it will behave exactly as if it accessed
+ * the target memory.
+ */
+int kvm_create_memory_alias(kvm_context_t,
+ uint64_t phys_start, uint64_t len,
+ uint64_t target_phys);
+
+/*!
+ * \brief Destroy a memory alias
+ *
+ * Removes an alias created with kvm_create_memory_alias().
+ */
+int kvm_destroy_memory_alias(kvm_context_t, uint64_t phys_start);
+
+/*!
+ * \brief Get a bitmap of guest ram pages which are allocated to the guest.
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param phys_addr Memory slot phys addr
+ * \param bitmap Long aligned address of a big enough bitmap (one bit per page)
+ */
+int kvm_get_mem_map(kvm_context_t kvm, unsigned long phys_addr, void *bitmap);
+int kvm_get_mem_map_range(kvm_context_t kvm, unsigned long phys_addr,
+ unsigned long len, void *buf, void *opaque,
+ int (*cb)(unsigned long start,unsigned long len,
+ void* bitmap, void* opaque));
+int kvm_set_irq_level(kvm_context_t kvm, int irq, int level, int *status);
+
+int kvm_dirty_pages_log_enable_slot(kvm_context_t kvm,
+ uint64_t phys_start,
+ uint64_t len);
+int kvm_dirty_pages_log_disable_slot(kvm_context_t kvm,
+ uint64_t phys_start,
+ uint64_t len);
+/*!
+ * \brief Enable dirty-pages-logging for all memory regions
+ *
+ * \param kvm Pointer to the current kvm_context
+ */
+int kvm_dirty_pages_log_enable_all(kvm_context_t kvm);
+
+/*!
+ * \brief Disable dirty-page-logging for some memory regions
+ *
+ * Disable dirty-pages-logging for those memory regions that were
+ * created with dirty-page-logging disabled.
+ *
+ * \param kvm Pointer to the current kvm_context
+ */
+int kvm_dirty_pages_log_reset(kvm_context_t kvm);
+
+/*!
+ * \brief Query whether in kernel irqchip is used
+ *
+ * \param kvm Pointer to the current kvm_context
+ */
+int kvm_irqchip_in_kernel(kvm_context_t kvm);
+
+int kvm_has_sync_mmu(kvm_context_t kvm);
+
+#ifdef KVM_CAP_IRQCHIP
+/*!
+ * \brief Dump in kernel IRQCHIP contents
+ *
+ * Dump one of the in kernel irq chip devices, including PIC (master/slave)
+ * and IOAPIC into a kvm_irqchip structure
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param chip The irq chip device to be dumped
+ */
+int kvm_get_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip);
+
+/*!
+ * \brief Set in kernel IRQCHIP contents
+ *
+ * Write one of the in kernel irq chip devices, including PIC (master/slave)
+ * and IOAPIC
+ *
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param chip THe irq chip device to be written
+ */
+int kvm_set_irqchip(kvm_context_t kvm, struct kvm_irqchip *chip);
+
+#if defined(__i386__) || defined(__x86_64__)
+/*!
+ * \brief Get in kernel local APIC for vcpu
+ *
+ * Save the local apic state including the timer of a virtual CPU
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu Which virtual CPU should be accessed
+ * \param s Local apic state of the specific virtual CPU
+ */
+int kvm_get_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s);
+
+/*!
+ * \brief Set in kernel local APIC for vcpu
+ *
+ * Restore the local apic state including the timer of a virtual CPU
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu Which virtual CPU should be accessed
+ * \param s Local apic state of the specific virtual CPU
+ */
+int kvm_set_lapic(kvm_context_t kvm, int vcpu, struct kvm_lapic_state *s);
+
+#endif
+
+/*!
+ * \brief Simulate an NMI
+ *
+ * This allows you to simulate a non-maskable interrupt.
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu Which virtual CPU should get dumped
+ * \return 0 on success
+ */
+int kvm_inject_nmi(kvm_context_t kvm, int vcpu);
+
+#endif
+
+/*!
+ * \brief Query wheather in kernel pit is used
+ *
+ * \param kvm Pointer to the current kvm_context
+ */
+int kvm_pit_in_kernel(kvm_context_t kvm);
+
+/*!
+ * \brief Initialize coalesced MMIO
+ *
+ * Check for coalesced MMIO capability and store in context
+ *
+ * \param kvm Pointer to the current kvm_context
+ */
+int kvm_init_coalesced_mmio(kvm_context_t kvm);
+
+#ifdef KVM_CAP_PIT
+
+#if defined(__i386__) || defined(__x86_64__)
+/*!
+ * \brief Get in kernel PIT of the virtual domain
+ *
+ * Save the PIT state.
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param s PIT state of the virtual domain
+ */
+int kvm_get_pit(kvm_context_t kvm, struct kvm_pit_state *s);
+
+/*!
+ * \brief Set in kernel PIT of the virtual domain
+ *
+ * Restore the PIT state.
+ * Timer would be retriggerred after restored.
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param s PIT state of the virtual domain
+ */
+int kvm_set_pit(kvm_context_t kvm, struct kvm_pit_state *s);
+#endif
+
+int kvm_reinject_control(kvm_context_t kvm, int pit_reinject);
+
+#endif
+
+#ifdef KVM_CAP_VAPIC
+
+/*!
+ * \brief Enable kernel tpr access reporting
+ *
+ * When tpr access reporting is enabled, the kernel will call the
+ * ->tpr_access() callback every time the guest vcpu accesses the tpr.
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu vcpu to enable tpr access reporting on
+ */
+int kvm_enable_tpr_access_reporting(kvm_context_t kvm, int vcpu);
+
+/*!
+ * \brief Disable kernel tpr access reporting
+ *
+ * Undoes the effect of kvm_enable_tpr_access_reporting().
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param vcpu vcpu to disable tpr access reporting on
+ */
+int kvm_disable_tpr_access_reporting(kvm_context_t kvm, int vcpu);
+
+int kvm_enable_vapic(kvm_context_t kvm, int vcpu, uint64_t vapic);
+
+#endif
+
+#if defined(__s390__)
+int kvm_s390_initial_reset(kvm_context_t kvm, int slot);
+int kvm_s390_interrupt(kvm_context_t kvm, int slot,
+ struct kvm_s390_interrupt *kvmint);
+int kvm_s390_set_initial_psw(kvm_context_t kvm, int slot, psw_t psw);
+int kvm_s390_store_status(kvm_context_t kvm, int slot, unsigned long addr);
+#endif
+
+#ifdef KVM_CAP_DEVICE_ASSIGNMENT
+/*!
+ * \brief Notifies host kernel about a PCI device to be assigned to a guest
+ *
+ * Used for PCI device assignment, this function notifies the host
+ * kernel about the assigning of the physical PCI device to a guest.
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param assigned_dev Parameters, like bus, devfn number, etc
+ */
+int kvm_assign_pci_device(kvm_context_t kvm,
+ struct kvm_assigned_pci_dev *assigned_dev);
+
+/*!
+ * \brief Assign IRQ for an assigned device
+ *
+ * Used for PCI device assignment, this function assigns IRQ numbers for
+ * an physical device and guest IRQ handling.
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param assigned_irq Parameters, like dev id, host irq, guest irq, etc
+ */
+int kvm_assign_irq(kvm_context_t kvm,
+ struct kvm_assigned_irq *assigned_irq);
+
+#ifdef KVM_CAP_ASSIGN_DEV_IRQ
+/*!
+ * \brief Deassign IRQ for an assigned device
+ *
+ * Used for PCI device assignment, this function deassigns IRQ numbers
+ * for an assigned device.
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param assigned_irq Parameters, like dev id, host irq, guest irq, etc
+ */
+int kvm_deassign_irq(kvm_context_t kvm,
+ struct kvm_assigned_irq *assigned_irq);
+#endif
+#endif
+
+/*!
+ * \brief Determines whether destroying memory regions is allowed
+ *
+ * KVM before 2.6.29 had a bug when destroying memory regions.
+ *
+ * \param kvm Pointer to the current kvm_context
+ */
+int kvm_destroy_memory_region_works(kvm_context_t kvm);
+
+#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
+/*!
+ * \brief Notifies host kernel about a PCI device to be deassigned from a guest
+ *
+ * Used for hot remove PCI device, this function notifies the host
+ * kernel about the deassigning of the physical PCI device from a guest.
+ *
+ * \param kvm Pointer to the current kvm_context
+ * \param assigned_dev Parameters, like bus, devfn number, etc
+ */
+int kvm_deassign_pci_device(kvm_context_t kvm,
+ struct kvm_assigned_pci_dev *assigned_dev);
+#endif
+
+/*!
+ * \brief Checks whether the generic irq routing capability is present
+ *
+ * Checks whether kvm can reroute interrupts among the various interrupt
+ * controllers.
+ *
+ * \param kvm Pointer to the current kvm_context
+ */
+int kvm_has_gsi_routing(kvm_context_t kvm);
+
+/*!
+ * \brief Determines the number of gsis that can be routed
+ *
+ * Returns the number of distinct gsis that can be routed by kvm. This is
+ * also the number of distinct routes (if a gsi has two routes, than another
+ * gsi cannot be used...)
+ *
+ * \param kvm Pointer to the current kvm_context
+ */
+int kvm_get_gsi_count(kvm_context_t kvm);
+
+/*!
+ * \brief Clears the temporary irq routing table
+ *
+ * Clears the temporary irq routing table. Nothing is committed to the
+ * running VM.
+ *
+ * \param kvm Pointer to the current kvm_context
+ */
+int kvm_clear_gsi_routes(kvm_context_t kvm);
+
+/*!
+ * \brief Adds an irq route to the temporary irq routing table
+ *
+ * Adds an irq route to the temporary irq routing table. Nothing is
+ * committed to the running VM.
+ *
+ * \param kvm Pointer to the current kvm_context
+ */
+int kvm_add_irq_route(kvm_context_t kvm, int gsi, int irqchip, int pin);
+
+/*!
+ * \brief Removes an irq route from the temporary irq routing table
+ *
+ * Adds an irq route to the temporary irq routing table. Nothing is
+ * committed to the running VM.
+ *
+ * \param kvm Pointer to the current kvm_context
+ */
+int kvm_del_irq_route(kvm_context_t kvm, int gsi, int irqchip, int pin);
+
+struct kvm_irq_routing_entry;
+/*!
+ * \brief Adds a routing entry to the temporary irq routing table
+ *
+ * Adds a filled routing entry to the temporary irq routing table. Nothing is
+ * committed to the running VM.
+ *
+ * \param kvm Pointer to the current kvm_context
+ */
+int kvm_add_routing_entry(kvm_context_t kvm,
+ struct kvm_irq_routing_entry* entry);
+
+/*!
+ * \brief Removes a routing from the temporary irq routing table
+ *
+ * Remove a routing to the temporary irq routing table. Nothing is
+ * committed to the running VM.
+ *
+ * \param kvm Pointer to the current kvm_context
+ */
+int kvm_del_routing_entry(kvm_context_t kvm,
+ struct kvm_irq_routing_entry* entry);
+
+/*!
+ * \brief Commit the temporary irq routing table
+ *
+ * Commit the temporary irq routing table to the running VM.
+ *
+ * \param kvm Pointer to the current kvm_context
+ */
+int kvm_commit_irq_routes(kvm_context_t kvm);
+
+/*!
+ * \brief Get unused GSI number for irq routing table
+ *
+ * Get unused GSI number for irq routing table
+ *
+ * \param kvm Pointer to the current kvm_context
+ */
+int k