summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2008-12-01 18:51:57 +0200
committerAvi Kivity <avi@redhat.com>2008-12-01 18:51:57 +0200
commit13856f17ce790737b928961b7fb5a3adddc95124 (patch)
tree8e41ccd64029c5e90e1493b76cd8b5cecacb0884
parentDevice-assignment: init_assigned_device() error handling (diff)
parentSpelling and grammar fixes (diff)
downloadqemu-kvm-13856f17ce790737b928961b7fb5a3adddc95124.tar.gz
qemu-kvm-13856f17ce790737b928961b7fb5a3adddc95124.tar.bz2
qemu-kvm-13856f17ce790737b928961b7fb5a3adddc95124.zip
Merge branch 'qemu-cvs'
Conflicts: qemu/Makefile qemu/block.c qemu/configure qemu/exec.c qemu/hw/cirrus_vga.c qemu/hw/ide.c qemu/hw/vga.c qemu/vl.c Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--Makefile18
-rw-r--r--block.c108
-rw-r--r--bsd-user/bsd-mman.h121
-rw-r--r--bsd-user/mmap.c9
-rwxr-xr-xconfigure20
-rw-r--r--console.c74
-rw-r--r--console.h25
-rw-r--r--cpu-all.h2
-rw-r--r--cpu-defs.h11
-rw-r--r--cpu-exec.c2
-rw-r--r--exec-all.h1
-rw-r--r--exec.c90
-rw-r--r--gdbstub.c4
-rw-r--r--hw/blizzard.c18
-rw-r--r--hw/cirrus_vga.c159
-rw-r--r--hw/esp.c53
-rw-r--r--hw/g364fb.c12
-rw-r--r--hw/g364fb_template.h4
-rw-r--r--hw/ide.c8
-rw-r--r--hw/jazz_led.c28
-rw-r--r--hw/musicpal.c6
-rw-r--r--hw/omap_lcdc.c28
-rw-r--r--hw/pc.h4
-rw-r--r--hw/pl110.c6
-rw-r--r--hw/pxa2xx_lcd.c8
-rw-r--r--hw/ssd0303.c4
-rw-r--r--hw/ssd0323.c6
-rw-r--r--hw/sun4m.c2
-rw-r--r--hw/tc6393xb.c12
-rw-r--r--hw/tc6393xb_template.h4
-rw-r--r--hw/tcx.c16
-rw-r--r--hw/vga.c115
-rw-r--r--hw/vga_int.h16
-rw-r--r--hw/vmware_vga.c24
-rw-r--r--kvm-all.c122
-rw-r--r--kvm.h4
-rw-r--r--linux-user/signal.c9
-rw-r--r--m68k-dis.c2
-rw-r--r--qemu-char.c1
-rw-r--r--qemu-doc.texi20
-rw-r--r--qemu-kvm.c47
-rw-r--r--qemu-kvm.h5
-rw-r--r--qemu-tool.c2
-rw-r--r--sys-queue.h5
-rw-r--r--target-alpha/cpu.h1
-rw-r--r--target-alpha/translate.c4
-rw-r--r--target-arm/exec.h2
-rw-r--r--target-arm/translate.c4
-rw-r--r--target-cris/exec.h2
-rw-r--r--target-cris/translate.c4
-rw-r--r--target-i386/exec.h4
-rw-r--r--target-i386/helper.c2
-rw-r--r--target-i386/translate.c4
-rw-r--r--target-m68k/exec.h2
-rw-r--r--target-m68k/translate.c6
-rw-r--r--target-mips/exec.h1
-rw-r--r--target-mips/translate.c4
-rw-r--r--target-ppc/cpu.h11
-rw-r--r--target-ppc/exec.h17
-rw-r--r--target-ppc/helper.h41
-rw-r--r--target-ppc/op.c182
-rw-r--r--target-ppc/op_helper.c413
-rw-r--r--target-ppc/op_helper.h55
-rw-r--r--target-ppc/op_helper_mem.h356
-rw-r--r--target-ppc/op_mem.h1096
-rw-r--r--target-ppc/translate.c1795
-rw-r--r--target-sh4/exec.h2
-rw-r--r--target-sh4/translate.c4
-rw-r--r--target-sparc/exec.h3
-rw-r--r--target-sparc/translate.c4
-rw-r--r--tcg/tcg.c3
-rw-r--r--usb-stub.c2
-rw-r--r--vl.c19
-rw-r--r--vnc.c66
-rw-r--r--vnchextile.h10
75 files changed, 2091 insertions, 3263 deletions
diff --git a/Makefile b/Makefile
index 4efaa5153..59b4cf9ff 100644
--- a/Makefile
+++ b/Makefile
@@ -225,6 +225,16 @@ KEYMAPS=da en-gb et fr fr-ch is lt modifiers no pt-br sv \
ar de en-us fi fr-be hr it lv nl pl ru th \
common de-ch es fo fr-ca hu ja mk nl-be pt sl tr
+ifdef INSTALL_BLOBS
+BLOBS=bios.bin vgabios.bin vgabios-cirrus.bin ppc_rom.bin \
+video.x openbios-sparc32 openbios-sparc64 pxe-ne2k_pci.bin \
+pxe-rtl8139.bin pxe-pcnet.bin pxe-e1000.bin
+BLOBS += extboot.bin
+BLOBS += bamboo.dtb
+else
+BLOBS=
+endif
+
install-doc: $(DOCS)
mkdir -p "$(DESTDIR)$(docdir)"
$(INSTALL) -m 644 qemu-doc.html qemu-tech.html "$(DESTDIR)$(docdir)"
@@ -240,15 +250,13 @@ install: all $(if $(BUILD_DOCS),install-doc)
ifneq ($(TOOLS),)
$(INSTALL) -m 755 -s $(TOOLS) "$(DESTDIR)$(bindir)"
endif
+ifneq ($(BLOBS),)
mkdir -p "$(DESTDIR)$(datadir)"
- set -e; for x in bios.bin vgabios.bin vgabios-cirrus.bin ppc_rom.bin \
- video.x openbios-sparc32 openbios-sparc64 pxe-ne2k_pci.bin \
- pxe-rtl8139.bin pxe-pcnet.bin pxe-e1000.bin extboot.bin \
- bamboo.dtb; \
- do \
+ set -e; for x in $(BLOBS); do \
[ -f $(SRC_PATH)/pc-bios/$$x ] && \
$(INSTALL) -m 644 $(SRC_PATH)/pc-bios/$$x "$(DESTDIR)$(datadir)"; \
done
+endif
ifndef CONFIG_WIN32
mkdir -p "$(DESTDIR)$(datadir)/keymaps"
set -e; for x in $(KEYMAPS); do \
diff --git a/block.c b/block.c
index d9c0af5e5..9f374bd22 100644
--- a/block.c
+++ b/block.c
@@ -741,16 +741,16 @@ void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
}
struct partition {
- uint8_t boot_ind; /* 0x80 - active */
- uint8_t head; /* starting head */
- uint8_t sector; /* starting sector */
- uint8_t cyl; /* starting cylinder */
- uint8_t sys_ind; /* What partition type */
- uint8_t end_head; /* end head */
- uint8_t end_sector; /* end sector */
- uint8_t end_cyl; /* end cylinder */
- uint32_t start_sect; /* starting sector counting from 0 */
- uint32_t nr_sects; /* nr of sectors in partition */
+ uint8_t boot_ind; /* 0x80 - active */
+ uint8_t head; /* starting head */
+ uint8_t sector; /* starting sector */
+ uint8_t cyl; /* starting cylinder */
+ uint8_t sys_ind; /* What partition type */
+ uint8_t end_head; /* end head */
+ uint8_t end_sector; /* end sector */
+ uint8_t end_cyl; /* end cylinder */
+ uint32_t start_sect; /* starting sector counting from 0 */
+ uint32_t nr_sects; /* nr of sectors in partition */
} __attribute__((packed));
/* try to guess the disk logical geometry from the MSDOS partition table. Return 0 if OK, -1 if could not guess */
@@ -816,51 +816,51 @@ void bdrv_guess_geometry(BlockDriverState *bs, int *pcyls, int *pheads, int *pse
bdrv_get_geometry_hint(bs, &cylinders, &heads, &secs);
translation = bdrv_get_translation_hint(bs);
if (cylinders != 0) {
- *pcyls = cylinders;
- *pheads = heads;
- *psecs = secs;
+ *pcyls = cylinders;
+ *pheads = heads;
+ *psecs = secs;
} else {
- if (guess_disk_lchs(bs, &cylinders, &heads, &secs) == 0) {
- if (heads > 16) {
- /* if heads > 16, it means that a BIOS LBA
- translation was active, so the default
- hardware geometry is OK */
- lba_detected = 1;
- goto default_geometry;
- } else {
- *pcyls = cylinders;
- *pheads = heads;
- *psecs = secs;
- /* disable any translation to be in sync with
- the logical geometry */
- if (translation == BIOS_ATA_TRANSLATION_AUTO) {
- bdrv_set_translation_hint(bs,
- BIOS_ATA_TRANSLATION_NONE);
- }
- }
- } else {
- default_geometry:
- /* if no geometry, use a standard physical disk geometry */
- cylinders = nb_sectors / (16 * 63);
-
- if (cylinders > 16383)
- cylinders = 16383;
- else if (cylinders < 2)
- cylinders = 2;
- *pcyls = cylinders;
- *pheads = 16;
- *psecs = 63;
- if ((lba_detected == 1) && (translation == BIOS_ATA_TRANSLATION_AUTO)) {
- if ((*pcyls * *pheads) <= 131072) {
- bdrv_set_translation_hint(bs,
- BIOS_ATA_TRANSLATION_LARGE);
- } else {
- bdrv_set_translation_hint(bs,
- BIOS_ATA_TRANSLATION_LBA);
- }
- }
- }
- bdrv_set_geometry_hint(bs, *pcyls, *pheads, *psecs);
+ if (guess_disk_lchs(bs, &cylinders, &heads, &secs) == 0) {
+ if (heads > 16) {
+ /* if heads > 16, it means that a BIOS LBA
+ translation was active, so the default
+ hardware geometry is OK */
+ lba_detected = 1;
+ goto default_geometry;
+ } else {
+ *pcyls = cylinders;
+ *pheads = heads;
+ *psecs = secs;
+ /* disable any translation to be in sync with
+ the logical geometry */
+ if (translation == BIOS_ATA_TRANSLATION_AUTO) {
+ bdrv_set_translation_hint(bs,
+ BIOS_ATA_TRANSLATION_NONE);
+ }
+ }
+ } else {
+ default_geometry:
+ /* if no geometry, use a standard physical disk geometry */
+ cylinders = nb_sectors / (16 * 63);
+
+ if (cylinders > 16383)
+ cylinders = 16383;
+ else if (cylinders < 2)
+ cylinders = 2;
+ *pcyls = cylinders;
+ *pheads = 16;
+ *psecs = 63;
+ if ((lba_detected == 1) && (translation == BIOS_ATA_TRANSLATION_AUTO)) {
+ if ((*pcyls * *pheads) <= 131072) {
+ bdrv_set_translation_hint(bs,
+ BIOS_ATA_TRANSLATION_LARGE);
+ } else {
+ bdrv_set_translation_hint(bs,
+ BIOS_ATA_TRANSLATION_LBA);
+ }
+ }
+ }
+ bdrv_set_geometry_hint(bs, *pcyls, *pheads, *psecs);
}
}
diff --git a/bsd-user/bsd-mman.h b/bsd-user/bsd-mman.h
new file mode 100644
index 000000000..910e8c192
--- /dev/null
+++ b/bsd-user/bsd-mman.h
@@ -0,0 +1,121 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)mman.h 8.2 (Berkeley) 1/9/95
+ * $FreeBSD: src/sys/sys/mman.h,v 1.42 2008/03/28 04:29:27 ps Exp $
+ */
+
+#define TARGET_FREEBSD_MAP_RESERVED0080 0x0080 /* previously misimplemented MAP_INHERIT */
+#define TARGET_FREEBSD_MAP_RESERVED0100 0x0100 /* previously unimplemented MAP_NOEXTEND */
+#define TARGET_FREEBSD_MAP_STACK 0x0400 /* region grows down, like a stack */
+#define TARGET_FREEBSD_MAP_NOSYNC 0x0800 /* page to but do not sync underlying file */
+
+#define TARGET_FREEBSD_MAP_FLAGMASK 0x1ff7
+
+/* $NetBSD: mman.h,v 1.42 2008/11/18 22:13:49 ad Exp $ */
+
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)mman.h 8.2 (Berkeley) 1/9/95
+ */
+#define TARGET_NETBSD_MAP_INHERIT 0x0080 /* region is retained after exec */
+#define TARGET_NETBSD_MAP_TRYFIXED 0x0400 /* attempt hint address, even within break */
+#define TARGET_NETBSD_MAP_WIRED 0x0800 /* mlock() mapping when it is established */
+
+#define TARGET_NETBSD_MAP_STACK 0x2000 /* allocated from memory, swap space (stack) */
+
+#define TARGET_NETBSD_MAP_FLAGMASK 0x3ff7
+
+/* $OpenBSD: mman.h,v 1.18 2003/07/21 22:52:19 tedu Exp $ */
+/* $NetBSD: mman.h,v 1.11 1995/03/26 20:24:23 jtc Exp $ */
+
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)mman.h 8.1 (Berkeley) 6/2/93
+ */
+
+#define TARGET_OPENBSD_MAP_INHERIT 0x0080 /* region is retained after exec */
+#define TARGET_OPENBSD_MAP_NOEXTEND 0x0100 /* for MAP_FILE, don't change file size */
+#define TARGET_OPENBSD_MAP_TRYFIXED 0x0400 /* attempt hint address, even within heap */
+
+#define TARGET_OPENBSD_MAP_FLAGMASK 0x17f7
+
+// XXX
+#define TARGET_BSD_MAP_FLAGMASK 0x3ff7
diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c
index 754268ce7..94c5fbee5 100644
--- a/bsd-user/mmap.c
+++ b/bsd-user/mmap.c
@@ -27,6 +27,7 @@
#include "qemu.h"
#include "qemu-common.h"
+#include "bsd-mman.h"
//#define DEBUG_MMAP
@@ -223,7 +224,7 @@ static int mmap_frag(abi_ulong real_start,
if (!(flags & MAP_ANON)) {
/* msync() won't work here, so we return an error if write is
possible while it is a shared mapping */
- if ((flags & MAP_FLAGMASK) == MAP_SHARED &&
+ if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
(prot & PROT_WRITE))
return -EINVAL;
@@ -323,7 +324,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
printf("MAP_FIXED ");
if (flags & MAP_ANON)
printf("MAP_ANON ");
- switch(flags & MAP_FLAGMASK) {
+ switch(flags & TARGET_BSD_MAP_FLAGMASK) {
case MAP_PRIVATE:
printf("MAP_PRIVATE ");
break;
@@ -331,7 +332,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
printf("MAP_SHARED ");
break;
default:
- printf("[MAP_FLAGMASK=0x%x] ", flags & MAP_FLAGMASK);
+ printf("[MAP_FLAGMASK=0x%x] ", flags & TARGET_BSD_MAP_FLAGMASK);
break;
}
printf("fd=%d offset=" TARGET_FMT_lx "\n", fd, offset);
@@ -396,7 +397,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
(offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
/* msync() won't work here, so we return an error if write is
possible while it is a shared mapping */
- if ((flags & MAP_FLAGMASK) == MAP_SHARED &&
+ if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
(prot & PROT_WRITE)) {
errno = EINVAL;
goto fail;
diff --git a/configure b/configure
index 18ef980d2..63a85d695 100755
--- a/configure
+++ b/configure
@@ -16,6 +16,9 @@ TMPO="${TMPDIR1}/qemu-conf-${RANDOM}-$$-${RANDOM}.o"
TMPE="${TMPDIR1}/qemu-conf-${RANDOM}-$$-${RANDOM}"
TMPS="${TMPDIR1}/qemu-conf-${RANDOM}-$$-${RANDOM}.S"
TMPI="${TMPDIR1}/qemu-conf-${RANDOM}-$$-${RANDOM}.i"
+TMPSDLLOG="${TMPDIR1}/qemu-conf-sdl-$$-${RANDOM}.log"
+
+trap "rm -f $TMPC $TMPO $TMPE $TMPS $TMPI $TMPSDLLOG; exit" 0 2 3 15
# default parameters
prefix=""
@@ -120,6 +123,7 @@ kvm_cap_pit="no"
kvm_cap_device_assignment="no"
kerneldir=""
aix="no"
+blobs="yes"
signalfd="no"
eventfd="no"
cpu_emulation="yes"
@@ -389,6 +393,8 @@ for opt do
;;
--disable-aio) aio="no"
;;
+ --disable-blobs) blobs="no"
+ ;;
--kerneldir=*) kerneldir="$optarg"
;;
--disable-cpu-emulation) cpu_emulation="no"
@@ -504,6 +510,7 @@ echo " --enable-uname-release=R Return R for uname -r in usermode emulation"
echo " --sparc_cpu=V Build qemu for Sparc architecture v7, v8, v8plus, v8plusa, v9"
echo " --disable-vde disable support for vde network"
echo " --disable-aio disable AIO support"
+echo " --disable-blobs disable installing provided firmware blobs"
echo " --kerneldir=PATH look for kernel includes in PATH"
echo " --disable-cpu-emulation disables use of qemu cpu emulation code"
echo " --disable-libfdt disables use of libfdt support for device tree"
@@ -840,7 +847,7 @@ cat > $TMPC << EOF
#undef main /* We don't want SDL to override our main() */
int main( void ) { return SDL_Init (SDL_INIT_VIDEO); }
EOF
- if $cc $ARCH_CFLAGS -o $TMPE ${OS_CFLAGS} `$sdl_config --cflags 2> /dev/null` $TMPC `$sdl_config --libs 2> /dev/null` 2> /tmp/qemu-$$-sdl-config.log ; then
+ if $cc $ARCH_CFLAGS -o $TMPE ${OS_CFLAGS} `$sdl_config --cflags 2> /dev/null` $TMPC `$sdl_config --libs 2> /dev/null` 2> $TMPSDLLOG ; then
_sdlversion=`$sdl_config --version | sed 's/[^0-9]//g'`
if test "$_sdlversion" -lt 121 ; then
sdl_too_old=yes
@@ -1195,16 +1202,16 @@ echo "uname -r $uname_release"
echo "NPTL support $nptl"
echo "vde support $vde"
echo "AIO support $aio"
+echo "Install blobs $blobs"
echo "KVM support $kvm"
if test $sdl_too_old = "yes"; then
echo "-> Your SDL version is too old - please upgrade to have SDL support"
fi
-if [ -s /tmp/qemu-$$-sdl-config.log ]; then
+if [ -s $TMPSDLLOG ]; then
echo "The error log from compiling the libSDL test is: "
- cat /tmp/qemu-$$-sdl-config.log
+ cat $TMPSDLLOG
fi
-rm -f /tmp/qemu-$$-sdl-config.log
#if test "$sdl_static" = "no"; then
# echo "WARNING: cannot compile statically with SDL - qemu-fast won't have a graphical output"
#fi
@@ -1470,6 +1477,9 @@ if test "$aio" = "yes" ; then
echo "#define CONFIG_AIO 1" >> $config_h
echo "CONFIG_AIO=yes" >> $config_mak
fi
+if test "$blobs" = "yes" ; then
+ echo "INSTALL_BLOBS=yes" >> $config_mak
+fi
if test "$signalfd" = "yes" ; then
echo "#define CONFIG_signalfd 1" >> $config_h
fi
@@ -1882,5 +1892,3 @@ if test "$source_path_used" = "yes" ; then
ln -s $source_path/$f $f
done
fi
-
-rm -f $TMPO $TMPC $TMPE $TMPS $TMPI
diff --git a/console.c b/console.c
index bec9946c4..4e088d7a6 100644
--- a/console.c
+++ b/console.c
@@ -190,7 +190,7 @@ static unsigned int vga_get_color(DisplayState *ds, unsigned int rgba)
{
unsigned int r, g, b, color;
- switch(ds->depth) {
+ switch(ds_get_bits_per_pixel(ds)) {
#if 0
case 8:
r = (rgba >> 16) & 0xff;
@@ -227,9 +227,9 @@ static void vga_fill_rect (DisplayState *ds,
uint8_t *d, *d1;
int x, y, bpp;
- bpp = (ds->depth + 7) >> 3;
- d1 = ds->data +
- ds->linesize * posy + bpp * posx;
+ bpp = (ds_get_bits_per_pixel(ds) + 7) >> 3;
+ d1 = ds_get_data(ds) +
+ ds_get_linesize(ds) * posy + bpp * posx;
for (y = 0; y < height; y++) {
d = d1;
switch(bpp) {
@@ -252,7 +252,7 @@ static void vga_fill_rect (DisplayState *ds,
}
break;
}
- d1 += ds->linesize;
+ d1 += ds_get_linesize(ds);
}
}
@@ -263,27 +263,27 @@ static void vga_bitblt(DisplayState *ds, int xs, int ys, int xd, int yd, int w,
uint8_t *d;
int wb, y, bpp;
- bpp = (ds->depth + 7) >> 3;
+ bpp = (ds_get_bits_per_pixel(ds) + 7) >> 3;
wb = w * bpp;
if (yd <= ys) {
- s = ds->data +
- ds->linesize * ys + bpp * xs;
- d = ds->data +
- ds->linesize * yd + bpp * xd;
+ s = ds_get_data(ds) +
+ ds_get_linesize(ds) * ys + bpp * xs;
+ d = ds_get_data(ds) +
+ ds_get_linesize(ds) * yd + bpp * xd;
for (y = 0; y < h; y++) {
memmove(d, s, wb);
- d += ds->linesize;
- s += ds->linesize;
+ d += ds_get_linesize(ds);
+ s += ds_get_linesize(ds);
}
} else {
- s = ds->data +
- ds->linesize * (ys + h - 1) + bpp * xs;
- d = ds->data +
- ds->linesize * (yd + h - 1) + bpp * xd;
+ s = ds_get_data(ds) +
+ ds_get_linesize(ds) * (ys + h - 1) + bpp * xs;
+ d = ds_get_data(ds) +
+ ds_get_linesize(ds) * (yd + h - 1) + bpp * xd;
for (y = 0; y < h; y++) {
memmove(d, s, wb);
- d -= ds->linesize;
- s -= ds->linesize;
+ d -= ds_get_linesize(ds);
+ s -= ds_get_linesize(ds);
}
}
}
@@ -373,7 +373,7 @@ static const uint32_t color_table_rgb[2][8] = {
static inline unsigned int col_expand(DisplayState *ds, unsigned int col)
{
- switch(ds->depth) {
+ switch(ds_get_bits_per_pixel(ds)) {
case 8:
col |= col << 8;
col |= col << 16;
@@ -443,13 +443,13 @@ static void vga_putcharxy(DisplayState *ds, int x, int y, int ch,
bgcol = color_table[t_attrib->bold][t_attrib->bgcol];
}
- bpp = (ds->depth + 7) >> 3;
- d = ds->data +
- ds->linesize * y * FONT_HEIGHT + bpp * x * FONT_WIDTH;
- linesize = ds->linesize;
+ bpp = (ds_get_bits_per_pixel(ds) + 7) >> 3;
+ d = ds_get_data(ds) +
+ ds_get_linesize(ds) * y * FONT_HEIGHT + bpp * x * FONT_WIDTH;
+ linesize = ds_get_linesize(ds);
font_ptr = vgafont16 + FONT_HEIGHT * ch;
xorcol = bgcol ^ fgcol;
- switch(ds->depth) {
+ switch(ds_get_bits_per_pixel(ds)) {
case 8:
for(i = 0; i < FONT_HEIGHT; i++) {
font_data = *font_ptr++;
@@ -543,7 +543,7 @@ static void update_xy(TextConsole *s, int x, int y)
int y1, y2;
if (s == active_console) {
- if (!s->ds->depth) {
+ if (!ds_get_bits_per_pixel(s->ds)) {
text_update_xy(s, x, y);
return;
}
@@ -570,7 +570,7 @@ static void console_show_cursor(TextConsole *s, int show)
if (s == active_console) {
int x = s->x;
- if (!s->ds->depth) {
+ if (!ds_get_bits_per_pixel(s->ds)) {
s->cursor_invalidate = 1;
return;
}
@@ -604,7 +604,7 @@ static void console_refresh(TextConsole *s)
if (s != active_console)
return;
- if (!s->ds->depth) {
+ if (!ds_get_bits_per_pixel(s->ds)) {
s->text_x[0] = 0;
s->text_y[0] = 0;
s->text_x[1] = s->width - 1;
@@ -613,7 +613,7 @@ static void console_refresh(TextConsole *s)
return;
}
- vga_fill_rect(s->ds, 0, 0, s->ds->width, s->ds->height,
+ vga_fill_rect(s->ds, 0, 0, ds_get_width(s->ds), ds_get_height(s->ds),
color_table[0][COLOR_BLACK]);
y1 = s->y_displayed;
for(y = 0; y < s->height; y++) {
@@ -626,7 +626,7 @@ static void console_refresh(TextConsole *s)
if (++y1 == s->total_height)
y1 = 0;
}
- dpy_update(s->ds, 0, 0, s->ds->width, s->ds->height);
+ dpy_update(s->ds, 0, 0, ds_get_width(s->ds), ds_get_height(s->ds));
console_show_cursor(s, 1);
}
@@ -689,7 +689,7 @@ static void console_put_lf(TextConsole *s)
c++;
}
if (s == active_console && s->y_displayed == s->y_base) {
- if (!s->ds->depth) {
+ if (!ds_get_bits_per_pixel(s->ds)) {
s->text_x[0] = 0;
s->text_y[0] = 0;
s->text_x[1] = s->width - 1;
@@ -1048,7 +1048,7 @@ void console_select(unsigned int index)
if (s) {
active_console = s;
if (s->console_type != TEXT_CONSOLE && s->g_width && s->g_height
- && (s->g_width != s->ds->width || s->g_height != s->ds->height))
+ && (s->g_width != ds_get_width(s->ds) || s->g_height != ds_get_height(s->ds)))
dpy_resize(s->ds, s->g_width, s->g_height);
vga_hw_invalidate();
}
@@ -1158,12 +1158,12 @@ static void text_console_invalidate(void *opaque)
{
TextConsole *s = (TextConsole *) opaque;
- if (s->g_width != s->ds->width || s->g_height != s->ds->height) {
+ if (s->g_width != ds_get_width(s->ds) || s->g_height != ds_get_height(s->ds)) {
if (s->console_type == TEXT_CONSOLE_FIXED_SIZE)
dpy_resize(s->ds, s->g_width, s->g_height);
else {
- s->g_width = s->ds->width;
- s->g_height = s->ds->height;
+ s->g_width = ds_get_width(s->ds);
+ s->g_height = ds_get_height(s->ds);
text_console_resize(s);
}
}
@@ -1302,8 +1302,8 @@ CharDriverState *text_console_init(DisplayState *ds, const char *p)
s->total_height = DEFAULT_BACKSCROLL;
s->x = 0;
s->y = 0;
- width = s->ds->width;
- height = s->ds->height;
+ width = ds_get_width(s->ds);
+ height = ds_get_height(s->ds);
if (p != 0) {
width = strtoul(p, (char **)&p, 10);
if (*p == 'C') {
@@ -1347,7 +1347,7 @@ CharDriverState *text_console_init(DisplayState *ds, const char *p)
void qemu_console_resize(QEMUConsole *console, int width, int height)
{
if (console->g_width != width || console->g_height != height
- || !console->ds->data) {
+ || !ds_get_data(console->ds)) {
console->g_width = width;
console->g_height = height;
if (active_console == console) {
diff --git a/console.h b/console.h
index 8e2d5b290..9d6c5e162 100644
--- a/console.h
+++ b/console.h
@@ -114,6 +114,31 @@ static inline void dpy_cursor(DisplayState *s, int x, int y)
s->dpy_text_cursor(s, x, y);
}
+static inline int ds_get_linesize(DisplayState *ds)
+{
+ return ds->linesize;
+}
+
+static inline uint8_t* ds_get_data(DisplayState *ds)
+{
+ return ds->data;
+}
+
+static inline int ds_get_width(DisplayState *ds)
+{
+ return ds->width;
+}
+
+static inline int ds_get_height(DisplayState *ds)
+{
+ return ds->height;
+}
+
+static inline int ds_get_bits_per_pixel(DisplayState *ds)
+{
+ return ds->depth;
+}
+
typedef unsigned long console_ch_t;
static inline void console_write_ch(console_ch_t *dest, uint32_t ch)
{
diff --git a/cpu-all.h b/cpu-all.h
index 9907b871f..e9517d6dc 100644
--- a/cpu-all.h
+++ b/cpu-all.h
@@ -950,6 +950,8 @@ int cpu_physical_memory_set_dirty_tracking(int enable);
int cpu_physical_memory_get_dirty_tracking(void);
+void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr);
+
void dump_exec_info(FILE *f,
int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
diff --git a/cpu-defs.h b/cpu-defs.h
index 902d2b48e..6ca84b2ef 100644
--- a/cpu-defs.h
+++ b/cpu-defs.h
@@ -29,6 +29,7 @@
#include <inttypes.h>
#include <pthread.h>
#include "osdep.h"
+#include "sys-queue.h"
#ifndef TARGET_LONG_BITS
#error TARGET_LONG_BITS must be defined before including this header
@@ -115,7 +116,7 @@ typedef struct CPUTLBEntry {
target_ulong addr_write;
target_ulong addr_code;
/* Addend to virtual address to get physical address. IO accesses
- use the correcponding iotlb value. */
+ use the corresponding iotlb value. */
#if TARGET_PHYS_ADDR_BITS == 64
/* on i386 Linux make sure it is aligned */
target_phys_addr_t addend __attribute__((aligned(8)));
@@ -147,14 +148,14 @@ struct KVMState;
typedef struct CPUBreakpoint {
target_ulong pc;
int flags; /* BP_* */
- struct CPUBreakpoint *prev, *next;
+ TAILQ_ENTRY(CPUBreakpoint) entry;
} CPUBreakpoint;
typedef struct CPUWatchpoint {
target_ulong vaddr;
target_ulong len_mask;
int flags; /* BP_* */
- struct CPUWatchpoint *prev, *next;
+ TAILQ_ENTRY(CPUWatchpoint) entry;
} CPUWatchpoint;
/* forward decleration */
@@ -203,10 +204,10 @@ struct KVMCPUState {
\
/* from this point: preserved by CPU reset */ \
/* ice debug support */ \
- CPUBreakpoint *breakpoints; \
+ TAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints; \
int singlestep_enabled; \
\
- CPUWatchpoint *watchpoints; \
+ TAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints; \
CPUWatchpoint *watchpoint_hit; \
\
struct GDBRegisterState *gdb_regs; \
diff --git a/cpu-exec.c b/cpu-exec.c
index 537717274..d5eb16cb4 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -202,7 +202,7 @@ static void cpu_handle_debug_exception(CPUState *env)
CPUWatchpoint *wp;
if (!env->watchpoint_hit)
- for (wp = env->watchpoints; wp != NULL; wp = wp->next)
+ TAILQ_FOREACH(wp, &env->watchpoints, entry)
wp->flags &= ~BP_WATCHPOINT_HIT;
if (debug_excp_handler)
diff --git a/exec-all.h b/exec-all.h
index aec318ba8..ca97f575e 100644
--- a/exec-all.h
+++ b/exec-all.h
@@ -82,6 +82,7 @@ TranslationBlock *tb_gen_code(CPUState *env,
target_ulong pc, target_ulong cs_base, int flags,
int cflags);
void cpu_exec_init(CPUState *env);
+void cpu_loop_exit(void);
int page_unprotect(target_ulong address, unsigned long pc, void *puc);
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
int is_cpu_write_access);
diff --git a/exec.c b/exec.c
index 8122d88fe..c699043ed 100644
--- a/exec.c
+++ b/exec.c
@@ -548,6 +548,8 @@ void cpu_exec_init(CPUState *env)
cpu_index++;
}
env->cpu_index = cpu_index;
+ TAILQ_INIT(&env->breakpoints);
+ TAILQ_INIT(&env->watchpoints);
#ifdef __WIN32
env->thread_id = GetCurrentProcessId();
#else
@@ -1318,7 +1320,7 @@ int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
int flags, CPUWatchpoint **watchpoint)
{
target_ulong len_mask = ~(len - 1);
- CPUWatchpoint *wp, *prev_wp;
+ CPUWatchpoint *wp;
/* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
@@ -1335,25 +1337,10 @@ int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
wp->flags = flags;
/* keep all GDB-injected watchpoints in front */
- if (!(flags & BP_GDB) && env->watchpoints) {
- prev_wp = env->watchpoints;
- while (prev_wp->next != NULL && (prev_wp->next->flags & BP_GDB))
- prev_wp = prev_wp->next;
- } else {
- prev_wp = NULL;
- }
-
- /* Insert new watchpoint */
- if (prev_wp) {
- wp->next = prev_wp->next;
- prev_wp->next = wp;
- } else {
- wp->next = env->watchpoints;
- env->watchpoints = wp;
- }
- if (wp->next)
- wp->next->prev = wp;
- wp->prev = prev_wp;
+ if (flags & BP_GDB)
+ TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
+ else
+ TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
tlb_flush_page(env, addr);
@@ -1369,7 +1356,7 @@ int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
target_ulong len_mask = ~(len - 1);
CPUWatchpoint *wp;
- for (wp = env->watchpoints; wp != NULL; wp = wp->next) {
+ TAILQ_FOREACH(wp, &env->watchpoints, entry) {
if (addr == wp->vaddr && len_mask == wp->len_mask
&& flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
cpu_watchpoint_remove_by_ref(env, wp);
@@ -1382,12 +1369,7 @@ int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
/* Remove a specific watchpoint by reference. */
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
{
- if (watchpoint->next)
- watchpoint->next->prev = watchpoint->prev;
- if (watchpoint->prev)
- watchpoint->prev->next = watchpoint->next;
- else
- env->watchpoints = watchpoint->next;
+ TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
tlb_flush_page(env, watchpoint->vaddr);
@@ -1397,11 +1379,12 @@ void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
/* Remove all matching watchpoints. */
void cpu_watchpoint_remove_all(CPUState *env, int mask)
{
- CPUWatchpoint *wp;
+ CPUWatchpoint *wp, *next;
- for (wp = env->watchpoints; wp != NULL; wp = wp->next)
+ TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
if (wp->flags & mask)
cpu_watchpoint_remove_by_ref(env, wp);
+ }
}
/* Add a breakpoint. */
@@ -1409,7 +1392,7 @@ int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
CPUBreakpoint **breakpoint)
{
#if defined(TARGET_HAS_ICE)
- CPUBreakpoint *bp, *prev_bp;
+ CPUBreakpoint *bp;
bp = qemu_malloc(sizeof(*bp));
if (!bp)
@@ -1419,25 +1402,10 @@ int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
bp->flags = flags;
/* keep all GDB-injected breakpoints in front */
- if (!(flags & BP_GDB) && env->breakpoints) {
- prev_bp = env->breakpoints;
- while (prev_bp->next != NULL && (prev_bp->next->flags & BP_GDB))
- prev_bp = prev_bp->next;
- } else {
- prev_bp = NULL;
- }
-
- /* Insert new breakpoint */
- if (prev_bp) {
- bp->next = prev_bp->next;
- prev_bp->next = bp;
- } else {
- bp->next = env->breakpoints;
- env->breakpoints = bp;
- }
- if (bp->next)
- bp->next->prev = bp;
- bp->prev = prev_bp;
+ if (flags & BP_GDB)
+ TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
+ else
+ TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
if (kvm_enabled())
kvm_update_debugger(env);
@@ -1458,7 +1426,7 @@ int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
#if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp;
- for (bp = env->breakpoints; bp != NULL; bp = bp->next) {
+ TAILQ_FOREACH(bp, &env->breakpoints, entry) {
if (bp->pc == pc && bp->flags == flags) {
cpu_breakpoint_remove_by_ref(env, bp);
return 0;
@@ -1474,12 +1442,7 @@ int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
{
#if defined(TARGET_HAS_ICE)
- if (breakpoint->next)
- breakpoint->next->prev = breakpoint->prev;
- if (breakpoint->prev)
- breakpoint->prev->next = breakpoint->next;
- else
- env->breakpoints = breakpoint->next;
+ TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
if (kvm_enabled())
kvm_update_debugger(env);
@@ -1494,11 +1457,12 @@ void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
void cpu_breakpoint_remove_all(CPUState *env, int mask)
{
#if defined(TARGET_HAS_ICE)
- CPUBreakpoint *bp;
+ CPUBreakpoint *bp, *next;
- for (bp = env->breakpoints; bp != NULL; bp = bp->next)
+ TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
if (bp->flags & mask)
cpu_breakpoint_remove_by_ref(env, bp);
+ }
#endif
}
@@ -1917,6 +1881,12 @@ int cpu_physical_memory_get_dirty_tracking(void)
return in_migration;
}
+void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
+{
+ if (kvm_enabled())
+ kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
+}
+
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
{
ram_addr_t ram_addr;
@@ -2029,7 +1999,7 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
code_address = address;
/* Make accesses to pages with watchpoints go via the
watchpoint trap routines. */
- for (wp = env->watchpoints; wp != NULL; wp = wp->next) {
+ TAILQ_FOREACH(wp, &env->watchpoints, entry) {
if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
iotlb = io_mem_watch + paddr;
/* TODO: The memory case can be optimized by not trapping
@@ -2576,7 +2546,7 @@ static void check_watchpoint(int offset, int len_mask, int flags)
return;
}
vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
- for (wp = env->watchpoints; wp != NULL; wp = wp->next) {
+ TAILQ_FOREACH(wp, &env->watchpoints, entry) {
if ((vaddr == (wp->vaddr & len_mask) ||
(vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
wp->flags |= BP_WATCHPOINT_HIT;
diff --git a/gdbstub.c b/gdbstub.c
index 33d17f62a..b02571a95 100644
--- a/gdbstub.c
+++ b/gdbstub.c
@@ -429,7 +429,7 @@ static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
GET_REGL(env->gpr[n]);
} else if (n < 64) {
/* fprs */
- stfq_p(mem_buf, env->fpr[n]);
+ stfq_p(mem_buf, env->fpr[n-32]);
return 8;
} else {
switch (n) {
@@ -460,7 +460,7 @@ static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
return sizeof(target_ulong);
} else if (n < 64) {
/* fprs */
- env->fpr[n] = ldfq_p(mem_buf);
+ env->fpr[n-32] = ldfq_p(mem_buf);
return 8;
} else {
switch (n) {
diff --git a/hw/blizzard.c b/hw/blizzard.c
index 4fb005e36..30641f5dc 100644
--- a/hw/blizzard.c
+++ b/hw/blizzard.c
@@ -166,7 +166,7 @@ static void blizzard_window(struct blizzard_s *s)
s->my[1] = s->data.y + s->data.dy;
bypp[0] = s->bpp;
- bypp[1] = (s->state->depth + 7) >> 3;
+ bypp[1] = (ds_get_bits_per_pixel(s->state) + 7) >> 3;
bypl[0] = bypp[0] * s->data.pitch;
bypl[1] = bypp[1] * s->x;
bypl[2] = bypp[0] * s->data.dx;
@@ -895,7 +895,7 @@ static void blizzard_update_display(void *opaque)
if (!s->enable)
return;
- if (s->x != s->state->width || s->y != s->state->height) {
+ if (s->x != ds_get_width(s->state) || s->y != ds_get_height(s->state)) {
s->invalidate = 1;
qemu_console_resize(s->console, s->x, s->y);
}
@@ -904,8 +904,8 @@ static void blizzard_update_display(void *opaque)
s->invalidate = 0;
if (s->blank) {
- bypp = (s->state->depth + 7) >> 3;
- memset(s->state->data, 0, bypp * s->x * s->y);
+ bypp = (ds_get_bits_per_pixel(s->state) + 7) >> 3;
+ memset(ds_get_data(s->state), 0, bypp * s->x * s->y);
return;
}
@@ -918,12 +918,12 @@ static void blizzard_update_display(void *opaque)
if (s->mx[1] <= s->mx[0])
return;
- bypp = (s->state->depth + 7) >> 3;
+ bypp = (ds_get_bits_per_pixel(s->state) + 7) >> 3;
bypl = bypp * s->x;
bwidth = bypp * (s->mx[1] - s->mx[0]);
y = s->my[0];
src = s->fb + bypl * y + bypp * s->mx[0];
- dst = s->state->data + bypl * y + bypp * s->mx[0];
+ dst = ds_get_data(s->state) + bypl * y + bypp * s->mx[0];
for (; y < s->my[1]; y ++, src += bypl, dst += bypl)
memcpy(dst, src, bwidth);
@@ -940,8 +940,8 @@ static void blizzard_screen_dump(void *opaque, const char *filename) {
struct blizzard_s *s = (struct blizzard_s *) opaque;
blizzard_update_display(opaque);
- if (s && s->state->data)
- ppm_save(filename, s->state->data, s->x, s->y, s->state->linesize);
+ if (s && ds_get_data(s->state))
+ ppm_save(filename, ds_get_data(s->state), s->x, s->y, ds_get_linesize(s->state));
}
#define DEPTH 8
@@ -962,7 +962,7 @@ void *s1d13745_init(qemu_irq gpio_int, DisplayState *ds)
s->state = ds;
s->fb = qemu_malloc(0x180000);
- switch (s->state->depth) {
+ switch (ds_get_bits_per_pixel(s->state)) {
case 0:
s->line_fn_tab[0] = s->line_fn_tab[1] =
qemu_mallocz(sizeof(blizzard_fn_t) * 0x10);
diff --git a/hw/cirrus_vga.c b/hw/cirrus_vga.c
index 3c82ebe84..1924b4333 100644
--- a/hw/cirrus_vga.c
+++ b/hw/cirrus_vga.c
@@ -31,9 +31,7 @@
#include "pci.h"
#include "console.h"
#include "vga_int.h"
-#ifndef _WIN32
-#include <sys/mman.h>
-#endif
+#include "kvm.h"
#include "qemu-kvm.h"
/*
@@ -1234,6 +1232,12 @@ static void cirrus_update_bank_ptr(CirrusVGAState * s, unsigned bank_index)
}
if (limit > 0) {
+ /* Thinking about changing bank base? First, drop the dirty bitmap information
+ * on the current location, otherwise we lose this pointer forever */
+ if (s->lfb_vram_mapped) {
+ target_phys_addr_t base_addr = isa_mem_base + 0xa0000 + bank_index * 0x8000;
+ cpu_physical_sync_dirty_bitmap(base_addr, base_addr + 0x8000);
+ }
s->cirrus_bank_base[bank_index] = offset;
s->cirrus_bank_limit[bank_index] = limit;
} else {
@@ -1362,6 +1366,7 @@ cirrus_hook_write_sr(CirrusVGAState * s, unsigned reg_index, int reg_value)
s->hw_cursor_y = (reg_value << 3) | (reg_index >> 5);
break;
case 0x07: // Extended Sequencer Mode
+ cirrus_update_memory_access(s);
case 0x08: // EEPROM Control
case 0x09: // Scratch Register 0
case 0x0a: // Scratch Register 1
@@ -2330,9 +2335,9 @@ static void cirrus_cursor_draw_line(VGAState *s1, uint8_t *d1, int scr_y)
color1 = s->rgb_to_pixel(c6_to_8(palette[0xf * 3]),
c6_to_8(palette[0xf * 3 + 1]),
c6_to_8(palette[0xf * 3 + 2]));
- bpp = ((s->ds->depth + 7) >> 3);
+ bpp = ((ds_get_bits_per_pixel(s->ds) + 7) >> 3);
d1 += x1 * bpp;
- switch(s->ds->depth) {
+ switch(ds_get_bits_per_pixel(s->ds)) {
default:
break;
case 8:
@@ -2627,76 +2632,56 @@ static CPUWriteMemoryFunc *cirrus_linear_bitblt_write[3] = {
cirrus_linear_bitblt_writel,
};
-void set_vram_mapping(void *ptr, unsigned long begin, unsigned long end)
+static void map_linear_vram(CirrusVGAState *s)
{
- /* align begin and end address */
- begin = begin & TARGET_PAGE_MASK;
- end = begin + VGA_RAM_SIZE;
- end = (end + TARGET_PAGE_SIZE -1 ) & TARGET_PAGE_MASK;
- if (kvm_enabled()) {
- kvm_cpu_register_physical_memory(begin, end - begin,
- ptr - (void *)phys_ram_base);
- kvm_qemu_log_memory(begin, end - begin, 1);
+ if (!s->map_addr && s->lfb_addr && s->lfb_end) {
+ s->map_addr = s->lfb_addr;
+ s->map_end = s->lfb_end;
+ cpu_register_physical_memory(s->map_addr, s->map_end - s->map_addr, s->vram_offset);
+ vga_dirty_log_start((VGAState *)s);
}
-}
-void unset_vram_mapping(void *ptr, unsigned long begin, unsigned long end)
-{
- /* align begin and end address */
- end = begin + VGA_RAM_SIZE;
- begin = begin & TARGET_PAGE_MASK;
- end = (end + TARGET_PAGE_SIZE -1 ) & TARGET_PAGE_MASK;
+ if (!s->map_addr)
+ return;
- if (kvm_enabled()) {
- kvm_qemu_log_memory(begin, end - begin, 0);
- kvm_cpu_unregister_physical_memory(begin, end - begin,
- ptr - (void *)phys_ram_base);
- }
-}
+ s->lfb_vram_mapped = 0;
-#ifdef CONFIG_X86
-static void kvm_update_vga_alias(CirrusVGAState *s, int ok, int bank)
-{
- unsigned limit, base;
+ if (!(s->cirrus_srcptr != s->cirrus_srcptr_end)
+ && !((s->sr[0x07] & 0x01) == 0)
+ && !((s->gr[0x0B] & 0x14) == 0x14)
+ && !(s->gr[0x0B] & 0x02)) {
- if (!ok && !s->aliases_enabled)
- return;
- limit = s->cirrus_bank_limit[bank];
- if (limit > 0x8000)
- limit = 0x8000;
- base = s->cirrus_lfb_addr + s->cirrus_bank_base[bank];
- if (ok) {
- if (!s->aliases_enabled
- || base != s->aliased_bank_base[bank]
- || limit != s->aliased_bank_limit[bank]) {
- kvm_qemu_create_memory_alias(0xa0000 + bank * 0x8000,
- limit, base);
- s->aliased_bank_base[bank] = base;
- s->aliased_bank_limit[bank] = limit;
- }
- } else {
- kvm_qemu_destroy_memory_alias(0xa0000 + bank * 0x8000);
+ cpu_register_physical_memory(isa_mem_base + 0xa0000, 0x8000,
+ (s->vram_offset + s->cirrus_bank_base[0]) | IO_MEM_RAM);
+ cpu_register_physical_memory(isa_mem_base + 0xa8000, 0x8000,
+ (s->vram_offset + s->cirrus_bank_base[1]) | IO_MEM_RAM);
+
+ s->lfb_vram_mapped = 1;
+ vga_dirty_log_start((VGAState *)s);
}
+ else {
+ cpu_register_physical_memory(isa_mem_base + 0xa0000, 0x8000, s->vga_io_memory);
+ cpu_register_physical_memory(isa_mem_base + 0xa8000, 0x8000, s->vga_io_memory);
+ }
+
}
-static void kvm_update_vga_aliases(CirrusVGAState *s, int ok)
+static void unmap_linear_vram(CirrusVGAState *s)
{
- if (kvm_enabled()) {
- kvm_update_vga_alias(s, ok, 0);
- kvm_update_vga_alias(s, ok, 1);
+ if (s->map_addr && s->lfb_addr && s->lfb_end) {
+ vga_dirty_log_stop((VGAState *)s);
+ s->map_addr = s->map_end = 0;
}
- s->aliases_enabled = ok;
+
+ cpu_register_physical_memory(isa_mem_base + 0xa0000, 0x20000,
+ s->vga_io_memory);
}
-#endif
/* Compute the memory access functions */
static void cirrus_update_memory_access(CirrusVGAState *s)
{
unsigned mode;
-#ifdef CONFIG_X86
- int want_vga_alias = 0;
-#endif
if ((s->sr[0x17] & 0x44) == 0x44) {
goto generic_io;
@@ -2711,42 +2696,18 @@ static void cirrus_update_memory_access(CirrusVGAState *s)
mode = s->gr[0x05] & 0x7;
if (mode < 4 || mode > 5 || ((s->gr[0x0B] & 0x4) == 0)) {
- if (kvm_enabled() && s->cirrus_lfb_addr && s->cirrus_lfb_end &&
- !s->map_addr) {
- set_vram_mapping(s->vram_ptr,
- s->cirrus_lfb_addr, s->cirrus_lfb_end);
- s->map_addr = s->cirrus_lfb_addr;
- s->map_end = s->cirrus_lfb_end;
- }
-#ifdef CONFIG_X86
- if (kvm_enabled()
- && !(s->cirrus_srcptr != s->cirrus_srcptr_end)
- && !((s->sr[0x07] & 0x01) == 0)
- && !((s->gr[0x0B] & 0x14) == 0x14)
- && !(s->gr[0x0B] & 0x02))
- want_vga_alias = 1;
-#endif
+ map_linear_vram(s);
s->cirrus_linear_write[0] = cirrus_linear_mem_writeb;
s->cirrus_linear_write[1] = cirrus_linear_mem_writew;
s->cirrus_linear_write[2] = cirrus_linear_mem_writel;
} else {
generic_io:
- if (kvm_enabled() && s->cirrus_lfb_addr && s->cirrus_lfb_end &&
- s->map_addr) {
- unset_vram_mapping(s->vram_ptr,
- s->cirrus_lfb_addr,
- s->cirrus_lfb_end);
- s->map_addr = s->map_end = 0;
- }
+ unmap_linear_vram(s);
s->cirrus_linear_write[0] = cirrus_linear_writeb;
s->cirrus_linear_write[1] = cirrus_linear_writew;
s->cirrus_linear_write[2] = cirrus_linear_writel;
}
}
-#if defined(CONFIG_X86)
- kvm_update_vga_aliases(s, want_vga_alias);
-#endif
-
}
@@ -3208,20 +3169,7 @@ static int cirrus_vga_load(QEMUFile *f, void *opaque, int version_id)
qemu_get_be32s(f, &s->hw_cursor_x);
qemu_get_be32s(f, &s->hw_cursor_y);
- if (kvm_enabled()) {
- int real_vram_size;
- qemu_get_be32s(f, &real_vram_size);
- if (real_vram_size != s->real_vram_size) {
- if (real_vram_size > s->real_vram_size)
- real_vram_size = s->real_vram_size;
- printf("%s: REAL_VRAM_SIZE MISMATCH !!!!!! SAVED=%d CURRENT=%d",
- __FUNCTION__, real_vram_size, s->real_vram_size);
- }
- qemu_get_buffer(f, s->vram_ptr, real_vram_size);
- cirrus_update_memory_access(s);
- }
-
-
+ cirrus_update_memory_access(s);
/* force refresh */
s->graphic_mode = -1;
cirrus_update_bank_ptr(s, 0);
@@ -3237,7 +3185,7 @@ static int cirrus_vga_load(QEMUFile *f, void *opaque, int version_id)
static void cirrus_init_common(CirrusVGAState * s, int device_id, int is_pci)
{
- int vga_io_memory, i;
+ int i;
static int inited;
if (!inited) {
@@ -3276,10 +3224,10 @@ static void cirrus_init_common(CirrusVGAState * s, int device_id, int is_pci)
register_ioport_read(0x3ba, 1, 1, vga_ioport_read, s);
register_ioport_read(0x3da, 1, 1, vga_ioport_read, s);
- vga_io_memory = cpu_register_io_memory(0, cirrus_vga_mem_read,
+ s->vga_io_memory = cpu_register_io_memory(0, cirrus_vga_mem_read,
cirrus_vga_mem_write, s);
cpu_register_physical_memory(isa_mem_base + 0x000a0000, 0x20000,
- vga_io_memory);
+ s->vga_io_memory);
if (kvm_enabled())
qemu_kvm_register_coalesced_mmio(isa_mem_base + 0x000a0000, 0x20000);
@@ -3353,7 +3301,7 @@ static void cirrus_init_common(CirrusVGAState * s, int device_id, int is_pci)
***************************************/
void isa_cirrus_vga_init(DisplayState *ds, uint8_t *vga_ram_base,
- unsigned long vga_ram_offset, int vga_ram_size)
+ ram_addr_t vga_ram_offset, int vga_ram_size)
{
CirrusVGAState *s;
@@ -3392,6 +3340,13 @@ static void cirrus_pci_lfb_map(PCIDevice *d, int region_num,
cpu_register_physical_memory(addr + 0x1000000, 0x400000,
s->cirrus_linear_bitblt_io_addr);
+
+ s->map_addr = s->map_end = 0;
+ s->lfb_addr = addr & TARGET_PAGE_MASK;
+ s->lfb_end = ((addr + VGA_RAM_SIZE) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
+ /* account for overflow */
+ if (s->lfb_end < addr + VGA_RAM_SIZE)
+ s->lfb_end = addr + VGA_RAM_SIZE;
}
static void cirrus_pci_mmio_map(PCIDevice *d, int region_num,
@@ -3404,7 +3359,7 @@ static void cirrus_pci_mmio_map(PCIDevice *d, int region_num,
}
void pci_cirrus_vga_init(PCIBus *bus, DisplayState *ds, uint8_t *vga_ram_base,
- unsigned long vga_ram_offset, int vga_ram_size)
+ ram_addr_t vga_ram_offset, int vga_ram_size)
{
PCICirrusVGAState *d;
uint8_t *pci_conf;
diff --git a/hw/esp.c b/hw/esp.c
index 6b16cf468..d28229c4a 100644
--- a/hw/esp.c
+++ b/hw/esp.c
@@ -44,8 +44,11 @@ do { printf("ESP: " fmt , ##args); } while (0)
#define DPRINTF(fmt, args...) do {} while (0)
#endif
+#define ESP_ERROR(fmt, args...) \
+do { printf("ESP ERROR: %s: " fmt, __func__ , ##args); } while (0)
+
#define ESP_REGS 16
-#define TI_BUFSZ 32
+#define TI_BUFSZ 16
typedef struct ESPState ESPState;
@@ -120,8 +123,8 @@ struct ESPState {
#define STAT_DI 0x01
#define STAT_CD 0x02
#define STAT_ST 0x03
-#define STAT_MI 0x06
-#define STAT_MO 0x07
+#define STAT_MO 0x06
+#define STAT_MI 0x07
#define STAT_PIO_MASK 0x06
#define STAT_TC 0x10
@@ -129,6 +132,8 @@ struct ESPState {
#define STAT_GE 0x40
#define STAT_INT 0x80
+#define BUSID_DID 0x07
+
#define INTR_FC 0x08
#define INTR_BS 0x10
#define INTR_DC 0x20
@@ -139,8 +144,6 @@ struct ESPState {
#define CFG1_RESREPT 0x40
-#define CFG2_MASK 0x15
-
#define TCHI_FAS100A 0x4
static void esp_raise_irq(ESPState *s)
@@ -164,16 +167,16 @@ static uint32_t get_cmd(ESPState *s, uint8_t *buf)
uint32_t dmalen;
int target;
- dmalen = s->rregs[ESP_TCLO] | (s->rregs[ESP_TCMID] << 8);
- target = s->wregs[ESP_WBUSID] & 7;
- DPRINTF("get_cmd: len %d target %d\n", dmalen, target);
+ target = s->wregs[ESP_WBUSID] & BUSID_DID;
if (s->dma) {
+ dmalen = s->rregs[ESP_TCLO] | (s->rregs[ESP_TCMID] << 8);
s->dma_memory_read(s->dma_opaque, buf, dmalen);
} else {
+ dmalen = s->ti_size;
+ memcpy(buf, s->ti_buf, dmalen);
buf[0] = 0;
- memcpy(&buf[1], s->ti_buf, dmalen);
- dmalen++;
}
+ DPRINTF("get_cmd: len %d target %d\n", dmalen, target);
s->ti_size = 0;
s->ti_rptr = 0;
@@ -318,7 +321,7 @@ static void esp_do_dma(ESPState *s)
} else {
s->current_dev->read_data(s->current_dev, 0);
/* If there is still data to be read from the device then
- complete the DMA operation immeriately. Otherwise defer
+ complete the DMA operation immediately. Otherwise defer
until the scsi layer has completed. */
if (s->dma_left == 0 && s->ti_size > 0) {
esp_dma_done(s);
@@ -407,6 +410,8 @@ static void esp_reset(void *opaque)
s->ti_wptr = 0;
s->dma = 0;
s->do_cmd = 0;
+
+ s->rregs[ESP_CFG1] = 7;
}
static void parent_esp_reset(void *opaque, int irq, int level)
@@ -427,8 +432,8 @@ static uint32_t esp_mem_readb(void *opaque, target_phys_addr_t addr)
if (s->ti_size > 0) {
s->ti_size--;
if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
- /* Data in/out. */
- fprintf(stderr, "esp: PIO data read not implemented\n");
+ /* Data out. */
+ ESP_ERROR("PIO data read not implemented\n");
s->rregs[ESP_FIFO] = 0;
} else {
s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
@@ -467,11 +472,8 @@ static void esp_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
case ESP_FIFO:
if (s->do_cmd) {
s->cmdbuf[s->cmdlen++] = val & 0xff;
- } else if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
- uint8_t buf;
- buf = val & 0xff;
- s->ti_size--;
- fprintf(stderr, "esp: PIO data write not implemented\n");
+ } else if (s->ti_size == TI_BUFSZ - 1) {
+ ESP_ERROR("fifo overrun\n");
} else {
s->ti_size++;
s->ti_buf[s->ti_wptr++] = val & 0xff;
@@ -515,6 +517,8 @@ static void esp_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
case CMD_ICCS:
DPRINTF("Initiator Command Complete Sequence (%2.2x)\n", val);
write_response(s);
+ s->rregs[ESP_RINTR] = INTR_FC;
+ s->rregs[ESP_RSTAT] |= STAT_MI;
break;
case CMD_MSGACC:
DPRINTF("Message Accepted (%2.2x)\n", val);
@@ -535,9 +539,10 @@ static void esp_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
break;
case CMD_ENSEL:
DPRINTF("Enable selection (%2.2x)\n", val);
+ s->rregs[ESP_RINTR] = 0;
break;
default:
- DPRINTF("Unhandled ESP command (%2.2x)\n", val);
+ ESP_ERROR("Unhandled ESP command (%2.2x)\n", val);
break;
}
break;
@@ -548,14 +553,12 @@ static void esp_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
break;
case ESP_WCCF ... ESP_WTEST:
break;
- case ESP_CFG2:
- s->rregs[saddr] = val & CFG2_MASK;
- break;
- case ESP_CFG3 ... ESP_RES4:
+ case ESP_CFG2 ... ESP_RES4:
s->rregs[saddr] = val;
break;
default:
- break;
+ ESP_ERROR("invalid write of 0x%02x at [0x%x]\n", val, saddr);
+ return;
}
s->wregs[saddr] = val;
}
@@ -620,6 +623,8 @@ void esp_scsi_attach(void *opaque, BlockDriverState *bd, int id)
if (id < 0) {
for (id = 0; id < ESP_MAX_DEVS; id++) {
+ if (id == (s->rregs[ESP_CFG1] & 0x7))
+ continue;
if (s->scsi_dev[id] == NULL)
break;
}
diff --git a/hw/g364fb.c b/hw/g364fb.c
index be045bf9e..ed690ea80 100644
--- a/hw/g364fb.c
+++ b/hw/g364fb.c
@@ -72,7 +72,7 @@ typedef struct G364State {
static void g364fb_draw_graphic(G364State *s, int full_update)
{
- switch (s->ds->depth) {
+ switch (ds_get_bits_per_pixel(s->ds)) {
case 8:
g364fb_draw_graphic8(s, full_update);
break;
@@ -86,7 +86,7 @@ static void g364fb_draw_graphic(G364State *s, int full_update)
g364fb_draw_graphic32(s, full_update);
break;
default:
- printf("g364fb: unknown depth %d\n", s->ds->depth);
+ printf("g364fb: unknown depth %d\n", ds_get_bits_per_pixel(s->ds));
return;
}
@@ -101,11 +101,11 @@ static void g364fb_draw_blank(G364State *s, int full_update)
if (!full_update)
return;
- w = s->scr_width * ((s->ds->depth + 7) >> 3);
- d = s->ds->data;
+ w = s->scr_width * ((ds_get_bits_per_pixel(s->ds) + 7) >> 3);
+ d = ds_get_data(s->ds);
for(i = 0; i < s->scr_height; i++) {
memset(d, 0, w);
- d += s->ds->linesize;
+ d += ds_get_linesize(s->ds);
}
dpy_update(s->ds, 0, 0, s->scr_width, s->scr_height);
@@ -131,7 +131,7 @@ static void g364fb_update_display(void *opaque)
s->graphic_mode = graphic_mode;
full_update = 1;
}
- if (s->scr_width != s->ds->width || s->scr_height != s->ds->height) {
+ if (s->scr_width != ds_get_width(s->ds) || s->scr_height != ds_get_height(s->ds)) {
qemu_console_resize(s->console, s->scr_width, s->scr_height);
full_update = 1;
}
diff --git a/hw/g364fb_template.h b/hw/g364fb_template.h
index 43755966e..2f10e9d03 100644
--- a/hw/g364fb_template.h
+++ b/hw/g364fb_template.h
@@ -28,7 +28,7 @@ static void glue(g364fb_draw_graphic, BPP)(G364State *s, int full_update)
data_buffer = s->vram_buffer;
w_display = s->scr_width * PIXEL_WIDTH / 8;
- data_display = s->ds->data;
+ data_display = ds_get_data(s->ds);
for(i = 0; i < s->scr_height; i++) {
dd = data_display;
for (j = 0; j < s->scr_width; j++, dd += PIXEL_WIDTH / 8, data_buffer++) {
@@ -38,6 +38,6 @@ static void glue(g364fb_draw_graphic, BPP)(G364State *s, int full_update)
s->palette[index][1],
s->palette[index][2]);
}
- data_display += s->ds->linesize;
+ data_display += ds_get_linesize(s->ds);
}
}
diff --git a/hw/ide.c b/hw/ide.c
index 6be5dcfbd..315576bb2 100644
--- a/hw/ide.c
+++ b/hw/ide.c
@@ -2695,10 +2695,10 @@ static void ide_init2(IDEState *ide_state,
s->bs = hd1;
if (s->bs) {
bdrv_get_geometry(s->bs, &nb_sectors);
- bdrv_guess_geometry(s->bs, &cylinders, &heads, &secs);
- s->cylinders = cylinders;
- s->heads = heads;
- s->sectors = secs;
+ bdrv_guess_geometry(s->bs, &cylinders, &heads, &secs);
+ s->cylinders = cylinders;
+ s->heads = heads;
+ s->sectors = secs;
s->nb_sectors = nb_sectors;
if (bdrv_get_type_hint(s->bs) == BDRV_TYPE_CDROM) {
diff --git a/hw/jazz_led.c b/hw/jazz_led.c
index 8460bfcd5..c8ac26372 100644
--- a/hw/jazz_led.c
+++ b/hw/jazz_led.c
@@ -155,8 +155,8 @@ static void draw_horizontal_line(DisplayState *ds, int posy, int posx1, int posx
uint8_t *d;
int x, bpp;
- bpp = (ds->depth + 7) >> 3;
- d = ds->data + ds->linesize * posy + bpp * posx1;
+ bpp = (ds_get_bits_per_pixel(ds) + 7) >> 3;
+ d = ds_get_data(ds) + ds_get_linesize(ds) * posy + bpp * posx1;
switch(bpp) {
case 1:
for (x = posx1; x <= posx2; x++) {
@@ -184,25 +184,25 @@ static void draw_vertical_line(DisplayState *ds, int posx, int posy1, int posy2,
uint8_t *d;
int y, bpp;
- bpp = (ds->depth + 7) >> 3;
- d = ds->data + ds->linesize * posy1 + bpp * posx;
+ bpp = (ds_get_bits_per_pixel(ds) + 7) >> 3;
+ d = ds_get_data(ds) + ds_get_linesize(ds) * posy1 + bpp * posx;
switch(bpp) {
case 1:
for (y = posy1; y <= posy2; y++) {
*((uint8_t *)d) = color;
- d += ds->linesize;
+ d += ds_get_linesize(ds);
}
break;
case 2:
for (y = posy1; y <= posy2; y++) {
*((uint16_t *)d) = color;
- d += ds->linesize;
+ d += ds_get_linesize(ds);
}
break;
case 4:
for (y = posy1; y <= posy2; y++) {
*((uint32_t *)d) = color;
- d += ds->linesize;
+ d += ds_get_linesize(ds);
}
break;
}
@@ -218,17 +218,17 @@ static void jazz_led_update_display(void *opaque)
if (s->state & REDRAW_BACKGROUND) {
/* clear screen */
- bpp = (ds->depth + 7) >> 3;
- d1 = ds->data;
- for (y = 0; y < ds->height; y++) {
- memset(d1, 0x00, ds->width * bpp);
- d1 += ds->linesize;
+ bpp = (ds_get_bits_per_pixel(ds) + 7) >> 3;
+ d1 = ds_get_data(ds);
+ for (y = 0; y < ds_get_height(ds); y++) {
+ memset(d1, 0x00, ds_get_width(ds) * bpp);
+ d1 += ds_get_linesize(ds);
}
}
if (s->state & REDRAW_SEGMENTS) {
/* set colors according to bpp */
- switch (ds->depth) {
+ switch (ds_get_bits_per_pixel(ds)) {
case 8:
color_segment = rgb_to_pixel8(0xaa, 0xaa, 0xaa);
color_led = rgb_to_pixel8(0x00, 0xff, 0x00);
@@ -272,7 +272,7 @@ static void jazz_led_update_display(void *opaque)
}
s->state = REDRAW_NONE;
- dpy_update(ds, 0, 0, ds->width, ds->height);
+ dpy_update(ds, 0, 0, ds_get_width(ds), ds_get_height(ds));
}
static void jazz_led_invalidate_display(void *opaque)
diff --git a/hw/musicpal.c b/hw/musicpal.c
index c7c11dea4..313b1341c 100644
--- a/hw/musicpal.c
+++ b/hw/musicpal.c
@@ -801,7 +801,7 @@ static inline void glue(set_lcd_pixel, depth) \
(musicpal_lcd_state *s, int x, int y, type col) \
{ \
int dx, dy; \
- type *pixel = &((type *) s->ds->data)[(y * 128 * 3 + x) * 3]; \
+ type *pixel = &((type *) ds_get_data(s->ds))[(y * 128 * 3 + x) * 3]; \
\
for (dy = 0; dy < 3; dy++, pixel += 127 * 3) \
for (dx = 0; dx < 3; dx++, pixel++) \
@@ -818,7 +818,7 @@ static void lcd_refresh(void *opaque)
musicpal_lcd_state *s = opaque;
int x, y, col;
- switch (s->ds->depth) {
+ switch (ds_get_bits_per_pixel(s->ds)) {
case 0:
return;
#define LCD_REFRESH(depth, func) \
@@ -838,7 +838,7 @@ static void lcd_refresh(void *opaque)
LCD_REFRESH(32, (s->ds->bgr ? rgb_to_pixel32bgr : rgb_to_pixel32))
default:
cpu_abort(cpu_single_env, "unsupported colour depth %i\n",
- s->ds->depth);
+ ds_get_bits_per_pixel(s->ds));
}
dpy_update(s->ds, 0, 0, 128*3, 64*3);
diff --git a/hw/omap_lcdc.c b/hw/omap_lcdc.c
index 99e8b4fa4..66cc8dfe2 100644
--- a/hw/omap_lcdc.c
+++ b/hw/omap_lcdc.c
@@ -125,7 +125,7 @@ static void omap_update_display(void *opaque)
uint8_t *s, *d;
if (!omap_lcd || omap_lcd->plm == 1 ||
- !omap_lcd->enable || !omap_lcd->state->depth)
+ !omap_lcd->enable || !ds_get_bits_per_pixel(omap_lcd->state))
return;
frame_offset = 0;
@@ -145,25 +145,25 @@ static void omap_update_display(void *opaque)
/* Colour depth */
switch ((omap_lcd->palette[0] >> 12) & 7) {
case 1:
- draw_line = draw_line_table2[omap_lcd->state->depth];
+ draw_line = draw_line_table2[ds_get_bits_per_pixel(omap_lcd->state)];
bpp = 2;
break;
case 2:
- draw_line = draw_line_table4[omap_lcd->state->depth];
+ draw_line = draw_line_table4[ds_get_bits_per_pixel(omap_lcd->state)];
bpp = 4;
break;
case 3:
- draw_line = draw_line_table8[omap_lcd->state->depth];
+ draw_line = draw_line_table8[ds_get_bits_per_pixel(omap_lcd->state)];
bpp = 8;
break;
case 4 ... 7:
if (!omap_lcd->tft)
- draw_line = draw_line_table12[omap_lcd->state->depth];
+ draw_line = draw_line_table12[ds_get_bits_per_pixel(omap_lcd->state)];
else
- draw_line = draw_line_table16[omap_lcd->state->depth];
+ draw_line = draw_line_table16[ds_get_bits_per_pixel(omap_lcd->state)];
bpp = 16;
break;
@@ -174,8 +174,8 @@ static void omap_update_display(void *opaque)
/* Resolution */
width = omap_lcd->width;
- if (width != omap_lcd->state->width ||
- omap_lcd->height != omap_lcd->state->height) {
+ if (width != ds_get_width(omap_lcd->state) ||
+ omap_lcd->height != ds_get_height(omap_lcd->state)) {
qemu_console_resize(omap_lcd->console,
omap_lcd->width, omap_lcd->height);
omap_lcd->invalidate = 1;
@@ -202,7 +202,7 @@ static void omap_update_display(void *opaque)
if (omap_lcd->dma->dual)
omap_lcd->dma->current_frame ^= 1;
- if (!omap_lcd->state->depth)
+ if (!ds_get_bits_per_pixel(omap_lcd->state))
return;
line = 0;
@@ -217,8 +217,8 @@ static void omap_update_display(void *opaque)
step = width * bpp >> 3;
scanline = frame_base + step * line;
s = (uint8_t *) (phys_ram_base + scanline);
- d = omap_lcd->state->data;
- linesize = omap_lcd->state->linesize;
+ d = ds_get_data(omap_lcd->state);
+ linesize = ds_get_linesize(omap_lcd->state);
dirty[0] = dirty[1] =
cpu_physical_memory_get_dirty(scanline, VGA_DIRTY_FLAG);
@@ -293,10 +293,10 @@ static int ppm_save(const char *filename, uint8_t *data,
static void omap_screen_dump(void *opaque, const char *filename) {
struct omap_lcd_panel_s *omap_lcd = opaque;
omap_update_display(opaque);
- if (omap_lcd && omap_lcd->state->data)
- ppm_save(filename, omap_lcd->state->data,
+ if (omap_lcd && ds_get_data(omap_lcd->state))
+ ppm_save(filename, ds_get_data(omap_lcd->state),
omap_lcd->width, omap_lcd->height,
- omap_lcd->state->linesize);
+ ds_get_linesize(omap_lcd->state));
}
static void omap_invalidate_display(void *opaque) {
diff --git a/hw/pc.h b/hw/pc.h
index 074195d86..5349a4196 100644
--- a/hw/pc.h
+++ b/hw/pc.h
@@ -146,9 +146,9 @@ int isa_vga_mm_init(DisplayState *ds, uint8_t *vga_ram_base,
/* cirrus_vga.c */
void pci_cirrus_vga_init(PCIBus *bus, DisplayState *ds, uint8_t *vga_ram_base,
- unsigned long vga_ram_offset, int vga_ram_size);
+ ram_addr_t vga_ram_offset, int vga_ram_size);
void isa_cirrus_vga_init(DisplayState *ds, uint8_t *vga_ram_base,
- unsigned long vga_ram_offset, int vga_ram_size);
+ ram_addr_t vga_ram_offset, int vga_ram_size);
/* ide.c */
void isa_ide_init(int iobase, int iobase2, qemu_irq irq,
diff --git a/hw/pl110.c b/hw/pl110.c
index 2437185f6..06541aece 100644
--- a/hw/pl110.c
+++ b/hw/pl110.c
@@ -124,7 +124,7 @@ static void pl110_update_display(void *opaque)
if (!pl110_enabled(s))
return;
- switch (s->ds->depth) {
+ switch (ds_get_bits_per_pixel(s->ds)) {
case 0:
return;
case 8:
@@ -190,7 +190,7 @@ static void pl110_update_display(void *opaque)
if (base > 0x80000000)
base -= 0x80000000;
src = phys_ram_base + base;
- dest = s->ds->data;
+ dest = ds_get_data(s->ds);
first = -1;
addr = base;
@@ -249,7 +249,7 @@ static void pl110_update_pallette(pl110_state *s, int n)
b = (raw & 0x1f) << 3;
/* The I bit is ignored. */
raw >>= 6;
- switch (s->ds->depth) {
+ switch (ds_get_bits_per_pixel(s->ds)) {
case 8:
s->pallette[n] = rgb_to_pixel8(r, g, b);
break;
diff --git a/hw/pxa2xx_lcd.c b/hw/pxa2xx_lcd.c
index 5e834fe76..715b2f1a4 100644
--- a/hw/pxa2xx_lcd.c
+++ b/hw/pxa2xx_lcd.c
@@ -650,7 +650,7 @@ static void pxa2xx_palette_parse(struct pxa2xx_lcdc_s *s, int ch, int bpp)
}
break;
}
- switch (s->ds->depth) {
+ switch (ds_get_bits_per_pixel(s->ds)) {
case 8:
*dest = rgb_to_pixel8(r, g, b) | alpha;
break;
@@ -693,7 +693,7 @@ static void pxa2xx_lcdc_dma0_redraw_horiz(struct pxa2xx_lcdc_s *s,
else if (s->bpp > pxa_lcdc_8bpp)
src_width *= 2;
- dest = s->ds->data;
+ dest = ds_get_data(s->ds);
dest_width = s->xres * s->dest_width;
addr = (ram_addr_t) (fb - phys_ram_base);
@@ -750,7 +750,7 @@ static void pxa2xx_lcdc_dma0_redraw_vert(struct pxa2xx_lcdc_s *s,
src_width *= 2;
dest_width = s->yres * s->dest_width;
- dest = s->ds->data + dest_width * (s->xres - 1);
+ dest = ds_get_data(s->ds) + dest_width * (s->xres - 1);
addr = (ram_addr_t) (fb - phys_ram_base);
start = addr + s->yres * src_width;
@@ -1006,7 +1006,7 @@ struct pxa2xx_lcdc_s *pxa2xx_lcdc_init(target_phys_addr_t base, qemu_irq irq,
pxa2xx_invalidate_display,
pxa2xx_screen_dump, NULL, s);
- switch (s->ds->depth) {
+ switch (ds_get_bits_per_pixel(s->ds)) {
case 0:
s->dest_width = 0;
break;
diff --git a/hw/ssd0303.c b/hw/ssd0303.c
index b0b099f0d..d10371952 100644
--- a/hw/ssd0303.c
+++ b/hw/ssd0303.c
@@ -206,7 +206,7 @@ static void ssd0303_update_display(void *opaque)
if (!s->redraw)
return;
- switch (s->ds->depth) {
+ switch (ds_get_bits_per_pixel(s->ds)) {
case 0:
return;
case 15:
@@ -238,7 +238,7 @@ static void ssd0303_update_display(void *opaque)
colors[0] = colortab + dest_width;
colors[1] = colortab;
}
- dest = s->ds->data;
+ dest = ds_get_data(s->ds);
for (y = 0; y < 16; y++) {
line = (y + s->start_line) & 63;
src = s->framebuffer + 132 * (line >> 3) + 36;
diff --git a/hw/ssd0323.c b/hw/ssd0323.c
index e496fe784..29cd52c96 100644
--- a/hw/ssd0323.c
+++ b/hw/ssd0323.c
@@ -187,7 +187,7 @@ static void ssd0323_update_display(void *opaque)
if (!s->redraw)
return;
- switch (s->ds->depth) {
+ switch (ds_get_bits_per_pixel(s->ds)) {
case 0:
return;
case 15:
@@ -210,7 +210,7 @@ static void ssd0323_update_display(void *opaque)
for (i = 0; i < 16; i++) {
int n;
colors[i] = p;
- switch (s->ds->depth) {
+ switch (ds_get_bits_per_pixel(s->ds)) {
case 15:
n = i * 2 + (i >> 3);
p[0] = n | (n << 5);
@@ -233,7 +233,7 @@ static void ssd0323_update_display(void *opaque)
p += dest_width;
}
/* TODO: Implement row/column remapping. */
- dest = s->ds->data;
+ dest = ds_get_data(s->ds);
for (y = 0; y < 64; y++) {
line = y;
src = s->framebuffer + 64 * line;
diff --git a/hw/sun4m.c b/hw/sun4m.c
index 4069cd3c4..2035b6c92 100644
--- a/hw/sun4m.c
+++ b/hw/sun4m.c
@@ -1589,7 +1589,7 @@ static void sun4c_hw_init(const struct sun4c_hwdef *hwdef, ram_addr_t RAM_size,
if (hwdef->fd_base != (target_phys_addr_t)-1) {
/* there is zero or one floppy drive */
- fd[1] = fd[0] = NULL;
+ memset(fd, 0, sizeof(fd));
drive_index = drive_get_index(IF_FLOPPY, 0, 0);
if (drive_index != -1)
fd[0] = drives_table[drive_index].bdrv;
diff --git a/hw/tc6393xb.c b/hw/tc6393xb.c
index 1fe57bd76..3af791fab 100644
--- a/hw/tc6393xb.c
+++ b/hw/tc6393xb.c
@@ -430,7 +430,7 @@ static void tc6393xb_nand_writeb(struct tc6393xb_s *s, target_phys_addr_t addr,
static void tc6393xb_draw_graphic(struct tc6393xb_s *s, int full_update)
{
- switch (s->ds->depth) {
+ switch (ds_get_bits_per_pixel(s->ds)) {
case 8:
tc6393xb_draw_graphic8(s);
break;
@@ -447,7 +447,7 @@ static void tc6393xb_draw_graphic(struct tc6393xb_s *s, int full_update)
tc6393xb_draw_graphic32(s);
break;
default:
- printf("tc6393xb: unknown depth %d\n", s->ds->depth);
+ printf("tc6393xb: unknown depth %d\n", ds_get_bits_per_pixel(s->ds));
return;
}
@@ -462,11 +462,11 @@ static void tc6393xb_draw_blank(struct tc6393xb_s *s, int full_update)
if (!full_update)
return;
- w = s->scr_width * ((s->ds->depth + 7) >> 3);
- d = s->ds->data;
+ w = s->scr_width * ((ds_get_bits_per_pixel(s->ds) + 7) >> 3);
+ d = ds_get_data(s->ds);
for(i = 0; i < s->scr_height; i++) {
memset(d, 0, w);
- d += s->ds->linesize;
+ d += ds_get_linesize(s->ds);
}
dpy_update(s->ds, 0, 0, s->scr_width, s->scr_height);
@@ -485,7 +485,7 @@ static void tc6393xb_update_display(void *opaque)
s->blanked = s->blank;
full_update = 1;
}
- if (s->scr_width != s->ds->width || s->scr_height != s->ds->height) {
+ if (s->scr_width != ds_get_width(s->ds) || s->scr_height != ds_get_height(s->ds)) {
qemu_console_resize(s->console, s->scr_width, s->scr_height);
full_update = 1;
}
diff --git a/hw/tc6393xb_template.h b/hw/tc6393xb_template.h
index e882c98a0..587382eec 100644
--- a/hw/tc6393xb_template.h
+++ b/hw/tc6393xb_template.h
@@ -46,12 +46,12 @@ static void glue(tc6393xb_draw_graphic, BITS)(struct tc6393xb_s *s)
data_buffer = (uint16_t*)(phys_ram_base + s->vram_addr);
w_display = s->scr_width * BITS / 8;
- data_display = s->ds->data;
+ data_display = ds_get_data(s->ds);
for(i = 0; i < s->scr_height; i++) {
#if (BITS == 16)
memcpy(data_display, data_buffer, s->scr_width * 2);
data_buffer += s->scr_width;
- data_display += s->ds->linesize;
+ data_display += ds_get_linesize(s->ds);
#else
int j;
for (j = 0; j < s->scr_width; j++, data_display += BITS / 8, data_buffer++) {
diff --git a/hw/tcx.c b/hw/tcx.c
index 260635a5f..de4fda0b4 100644
--- a/hw/tcx.c
+++ b/hw/tcx.c
@@ -55,7 +55,7 @@ static void update_palette_entries(TCXState *s, int start, int end)
{
int i;
for(i = start; i < end; i++) {
- switch(s->ds->depth) {
+ switch(ds_get_bits_per_pixel(s->ds)) {
default:
case 8:
s->palette[i] = rgb_to_pixel8(s->r[i], s->g[i], s->b[i]);
@@ -200,18 +200,18 @@ static void tcx_update_display(void *opaque)
uint8_t *d, *s;
void (*f)(TCXState *s1, uint8_t *dst, const uint8_t *src, int width);
- if (ts->ds->depth == 0)
+ if (ds_get_bits_per_pixel(ts->ds) == 0)
return;
page = ts->vram_offset;
y_start = -1;
page_min = 0xffffffff;
page_max = 0;
- d = ts->ds->data;
+ d = ds_get_data(ts->ds);
s = ts->vram;
- dd = ts->ds->linesize;
+ dd = ds_get_linesize(ts->ds);
ds = 1024;
- switch (ts->ds->depth) {
+ switch (ds_get_bits_per_pixel(ts->ds)) {
case 32:
f = tcx_draw_line32;
break;
@@ -278,7 +278,7 @@ static void tcx24_update_display(void *opaque)
uint8_t *d, *s;
uint32_t *cptr, *s24;
- if (ts->ds->depth != 32)
+ if (ds_get_bits_per_pixel(ts->ds) != 32)
return;
page = ts->vram_offset;
page24 = ts->vram24_offset;
@@ -286,11 +286,11 @@ static void tcx24_update_display(void *opaque)
y_start = -1;
page_min = 0xffffffff;
page_max = 0;
- d = ts->ds->data;
+ d = ds_get_data(ts->ds);
s = ts->vram;
s24 = ts->vram24;
cptr = ts->cplane;
- dd = ts->ds->linesize;
+ dd = ds_get_linesize(ts->ds);
ds = 1024;
for(y = 0; y < ts->height; y += 4, page += TARGET_PAGE_SIZE,
diff --git a/hw/vga.c b/hw/vga.c
index c71638855..173812cdd 100644
--- a/hw/vga.c
+++ b/hw/vga.c
@@ -28,10 +28,9 @@
#include "vga_int.h"
#include "pixel_ops.h"
#include "qemu-timer.h"
+#include "kvm.h"
#include "qemu-kvm.h"
-#include <sys/mman.h>
-
//#define DEBUG_VGA
//#define DEBUG_VGA_MEM
//#define DEBUG_VGA_REG
@@ -1154,7 +1153,7 @@ static int update_basic_params(VGAState *s)
static inline int get_depth_index(DisplayState *s)
{
- switch(s->depth) {
+ switch(ds_get_bits_per_pixel(s)) {
default:
case 8:
return 0;
@@ -1246,6 +1245,8 @@ static void vga_draw_text(VGAState *s, int full_update)
vga_draw_glyph8_func *vga_draw_glyph8;
vga_draw_glyph9_func *vga_draw_glyph9;
+ vga_dirty_log_stop(s);
+
full_update |= update_palette16(s);
palette = s->last_palette;
@@ -1282,7 +1283,7 @@ static void vga_draw_text(VGAState *s, int full_update)
cw = 9;
if (s->sr[1] & 0x08)
cw = 16; /* NOTE: no 18 pixel wide */
- x_incr = cw * ((s->ds->depth + 7) >> 3);
+ x_incr = cw * ((ds_get_bits_per_pixel(s->ds) + 7) >> 3);
width = (s->cr[0x01] + 1);
if (s->cr[0x06] == 100) {
/* ugly hack for CGA 160x100x16 - explain me the logic */
@@ -1332,8 +1333,8 @@ static void vga_draw_text(VGAState *s, int full_update)
vga_draw_glyph8 = vga_draw_glyph8_table[depth_index];
vga_draw_glyph9 = vga_draw_glyph9_table[depth_index];
- dest = s->ds->data;
- linesize = s->ds->linesize;
+ dest = ds_get_data(s->ds);
+ linesize = ds_get_linesize(s->ds);
ch_attr_ptr = s->last_ch_attr;
for(cy = 0; cy < height; cy++) {
d1 = dest;
@@ -1559,16 +1560,18 @@ void vga_invalidate_scanlines(VGAState *s, int y1, int y2)
}
}
-static int bitmap_get_dirty(unsigned long *bitmap, unsigned nr)
+static void vga_sync_dirty_bitmap(VGAState *s)
{
- unsigned word = nr / ((sizeof bitmap[0]) * 8);
- unsigned bit = nr % ((sizeof bitmap[0]) * 8);
+ if (s->map_addr)
+ cpu_physical_sync_dirty_bitmap(s->map_addr, s->map_end);
- //printf("%x -> %ld\n", nr, (bitmap[word] >> bit) & 1);
- return (bitmap[word] >> bit) & 1;
+ if (s->lfb_vram_mapped) {
+ cpu_physical_sync_dirty_bitmap(isa_mem_base + 0xa0000, 0xa8000);
+ cpu_physical_sync_dirty_bitmap(isa_mem_base + 0xa8000, 0xb0000);
+ }
+ vga_dirty_log_start(s);
}
-
/*
* graphic modes
*/
@@ -1581,23 +1584,12 @@ static void vga_draw_graphic(VGAState *s, int full_update)
uint32_t v, addr1, addr;
long page0, page1, page_min, page_max;
vga_draw_line_func *vga_draw_line;
- /* HACK ALERT */
-#define VGA_BITMAP_SIZE (VGA_RAM_SIZE / 4096 / 8 / sizeof(long))
- unsigned long bitmap[VGA_BITMAP_SIZE];
-#ifndef TARGET_IA64
- int r;
- if (kvm_enabled()) {
- r = qemu_kvm_get_dirty_pages(s->map_addr, &bitmap);
- if (r < 0)
- fprintf(stderr, "kvm: get_dirty_pages returned %d\n", r);
- }
-#else
- memset(bitmap, 0xff, VGA_BITMAP_SIZE*sizeof(long));
- //FIXME:Always flush full screen before log dirty ready!!
-#endif
full_update |= update_basic_params(s);
+ if (!full_update)
+ vga_sync_dirty_bitmap(s);
+
s->get_resolution(s, &width, &height);
disp_width = width;
@@ -1691,8 +1683,8 @@ static void vga_draw_graphic(VGAState *s, int full_update)
y_start = -1;
page_min = 0x7fffffff;
page_max = -1;
- d = s->ds->data;
- linesize = s->ds->linesize;
+ d = ds_get_data(s->ds);
+ linesize = ds_get_linesize(s->ds);
y1 = 0;
for(y = 0; y < height; y++) {
addr = addr1;
@@ -1710,17 +1702,11 @@ static void vga_draw_graphic(VGAState *s, int full_update)
update = full_update |
cpu_physical_memory_get_dirty(page0, VGA_DIRTY_FLAG) |
cpu_physical_memory_get_dirty(page1, VGA_DIRTY_FLAG);
- if (kvm_enabled()) {
- update |= bitmap_get_dirty(bitmap, (page0 - s->vram_offset) >> TARGET_PAGE_BITS);
- update |= bitmap_get_dirty(bitmap, (page1 - s->vram_offset) >> TARGET_PAGE_BITS);
- }
if ((page1 - page0) > TARGET_PAGE_SIZE) {
/* if wide line, can use another page */
update |= cpu_physical_memory_get_dirty(page0 + TARGET_PAGE_SIZE,
VGA_DIRTY_FLAG);
- if (kvm_enabled())
- update |= bitmap_get_dirty(bitmap, (page0 + TARGET_PAGE_SIZE - s->vram_offset) >> TARGET_PAGE_BITS);
}
/* explicit invalidation for the hardware cursor */
update |= (s->invalidated_y_table[y >> 5] >> (y & 0x1f)) & 1;
@@ -1778,15 +1764,17 @@ static void vga_draw_blank(VGAState *s, int full_update)
return;
if (s->last_scr_width <= 0 || s->last_scr_height <= 0)
return;
- if (s->ds->depth == 8)
+ vga_dirty_log_stop(s);
+
+ if (ds_get_bits_per_pixel(s->ds) == 8)
val = s->rgb_to_pixel(0, 0, 0);
else
val = 0;
- w = s->last_scr_width * ((s->ds->depth + 7) >> 3);
- d = s->ds->data;
+ w = s->last_scr_width * ((ds_get_bits_per_pixel(s->ds) + 7) >> 3);
+ d = ds_get_data(s->ds);
for(i = 0; i < s->last_scr_height; i++) {
memset(d, val, w);
- d += s->ds->linesize;
+ d += ds_get_linesize(s->ds);
}
dpy_update(s->ds, 0, 0,
s->last_scr_width, s->last_scr_height);
@@ -1801,7 +1789,7 @@ static void vga_update_display(void *opaque)
VGAState *s = (VGAState *)opaque;
int full_update, graphic_mode;
- if (s->ds->depth == 0) {
+ if (ds_get_bits_per_pixel(s->ds) == 0) {
/* nothing to do */
} else {
s->rgb_to_pixel =
@@ -2127,6 +2115,36 @@ typedef struct PCIVGAState {
VGAState vga_state;
} PCIVGAState;
+static int s1, s2;
+
+void vga_dirty_log_start(VGAState *s)
+{
+ if (kvm_enabled() && s->map_addr)
+ if (!s1) {
+ kvm_log_start(s->map_addr, s->map_end - s->map_addr);
+ s1 = 1;
+ }
+ if (kvm_enabled() && s->lfb_vram_mapped) {
+ if (!s2) {
+ kvm_log_start(isa_mem_base + 0xa0000, 0x8000);
+ kvm_log_start(isa_mem_base + 0xa8000, 0x8000);
+ }
+ s2 = 1;
+ }
+}
+
+void vga_dirty_log_stop(VGAState *s)
+{
+ if (kvm_enabled() && s->map_addr && s1)
+ kvm_log_stop(s->map_addr, s->map_end - s->map_addr);
+
+ if (kvm_enabled() && s->lfb_vram_mapped && s2) {
+ kvm_log_stop(isa_mem_base + 0xa0000, 0x8000);
+ kvm_log_stop(isa_mem_base + 0xa8000, 0x8000);
+ }
+ s1 = s2 = 0;
+}
+
static void vga_map(PCIDevice *pci_dev, int region_num,
uint32_t addr, uint32_t size, int type)
{
@@ -2136,11 +2154,12 @@ static void vga_map(PCIDevice *pci_dev, int region_num,
cpu_register_physical_memory(addr, s->bios_size, s->bios_offset);
} else {
cpu_register_physical_memory(addr, s->vram_size, s->vram_offset);
- if (kvm_enabled()) {
- kvm_qemu_log_memory(addr, VGA_RAM_SIZE, 1);
- s->map_addr = addr;
- }
}
+
+ s->map_addr = addr;
+ s->map_end = addr + VGA_RAM_SIZE;
+
+ vga_dirty_log_start(s);
}
#ifdef TARGET_IA64
@@ -2277,7 +2296,7 @@ void vga_bios_init(VGAState *s)
/* when used on xen/kvm environment, the vga_ram_base is not used */
void vga_common_init(VGAState *s, DisplayState *ds, uint8_t *vga_ram_base,
- unsigned long vga_ram_offset, int vga_ram_size)
+ ram_addr_t vga_ram_offset, int vga_ram_size)
{
int i, j, v, b;
@@ -2658,10 +2677,10 @@ static void vga_screen_dump(void *opaque, const char *filename)
s->graphic_mode = -1;
vga_update_display(s);
- if (ds->data) {
- ppm_save(filename, ds->data, vga_save_w, vga_save_h,
- s->ds->linesize);
- qemu_free(ds->data);
+ if (ds_get_data(ds)) {
+ ppm_save(filename, ds_get_data(ds), vga_save_w, vga_save_h,
+ ds_get_linesize(s->ds));
+ qemu_free(ds_get_data(ds));
}
*s->ds = saved_ds;
}
diff --git a/hw/vga_int.h b/hw/vga_int.h
index 7d1ec031b..cd9c92121 100644
--- a/hw/vga_int.h
+++ b/hw/vga_int.h
@@ -100,8 +100,13 @@ typedef void (* vga_update_retrace_info_fn)(struct VGAState *s);
#define VGA_STATE_COMMON \
uint8_t *vram_ptr; \
- unsigned long vram_offset; \
+ ram_addr_t vram_offset; \
unsigned int vram_size; \
+ uint32_t lfb_addr; \
+ uint32_t lfb_end; \
+ uint32_t map_addr; \
+ uint32_t map_end; \
+ uint32_t lfb_vram_mapped; /* whether 0xa0000 is mapped as ram */ \
unsigned long bios_offset; \
unsigned int bios_size; \
target_phys_addr_t base_ctrl; \
@@ -129,6 +134,7 @@ typedef void (* vga_update_retrace_info_fn)(struct VGAState *s);
int dac_8bit; \
uint8_t palette[768]; \
int32_t bank_offset; \
+ int vga_io_memory; \
int (*get_bpp)(struct VGAState *s); \
void (*get_offsets)(struct VGAState *s, \
uint32_t *pline_offset, \
@@ -168,8 +174,6 @@ typedef void (* vga_update_retrace_info_fn)(struct VGAState *s);
uint32_t last_palette[256]; \
uint32_t last_ch_attr[CH_ATTR_SIZE]; /* XXX: make it dynamic */ \
/* kvm */ \
- unsigned long map_addr; \
- unsigned long map_end; \
int32_t aliases_enabled; \
int32_t pad1; \
uint32_t aliased_bank_base[2]; \
@@ -193,8 +197,12 @@ static inline int c6_to_8(int v)
}
void vga_common_init(VGAState *s, DisplayState *ds, uint8_t *vga_ram_base,
- unsigned long vga_ram_offset, int vga_ram_size);
+ ram_addr_t vga_ram_offset, int vga_ram_size);
void vga_init(VGAState *s);
+
+void vga_dirty_log_start(VGAState *s);
+void vga_dirty_log_stop(VGAState *s);
+
uint32_t vga_mem_readb(void *opaque, target_phys_addr_t addr);
void vga_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val);
void vga_invalidate_scanlines(VGAState *s, int y1, int y2);
diff --git a/hw/vmware_vga.c b/hw/vmware_vga.c
index 625cd8357..2b7c7d3d1 100644
--- a/hw/vmware_vga.c
+++ b/hw/vmware_vga.c
@@ -319,7 +319,7 @@ static inline void vmsvga_update_rect(struct vmsvga_state_s *s,
width = s->bypp * w;
start = s->bypp * x + bypl * y;
src = s->vram + start;
- dst = s->ds->data + start;
+ dst = ds_get_data(s->ds) + start;
for (; line > 0; line --, src += bypl, dst += bypl)
memcpy(dst, src, width);
@@ -331,7 +331,7 @@ static inline void vmsvga_update_rect(struct vmsvga_state_s *s,
static inline void vmsvga_update_screen(struct vmsvga_state_s *s)
{
#ifndef DIRECT_VRAM
- memcpy(s->ds->data, s->vram, s->bypp * s->width * s->height);
+ memcpy(ds_get_data(s->ds), s->vram, s->bypp * s->width * s->height);
#endif
dpy_update(s->ds, 0, 0, s->width, s->height);
@@ -373,7 +373,7 @@ static inline void vmsvga_copy_rect(struct vmsvga_state_s *s,
int x0, int y0, int x1, int y1, int w, int h)
{
# ifdef DIRECT_VRAM
- uint8_t *vram = s->ds->data;
+ uint8_t *vram = ds_get_data(s->ds);
# else
uint8_t *vram = s->vram;
# endif
@@ -410,7 +410,7 @@ static inline void vmsvga_fill_rect(struct vmsvga_state_s *s,
uint32_t c, int x, int y, int w, int h)
{
# ifdef DIRECT_VRAM
- uint8_t *vram = s->ds->data;
+ uint8_t *vram = ds_get_data(s->ds);
# else
uint8_t *vram = s->vram;
# endif
@@ -915,7 +915,7 @@ static void vmsvga_reset(struct vmsvga_state_s *s)
s->width = -1;
s->height = -1;
s->svgaid = SVGA_ID;
- s->depth = s->ds->depth ? s->ds->depth : 24;
+ s->depth = ds_get_bits_per_pixel(s->ds) ? ds_get_bits_per_pixel(s->ds) : 24;
s->bypp = (s->depth + 7) >> 3;
s->cursor.on = 0;
s->redraw_fifo_first = 0;
@@ -976,7 +976,7 @@ static void vmsvga_screen_dump(void *opaque, const char *filename)
}
if (s->depth == 32) {
- ppm_save(filename, s->vram, s->width, s->height, s->ds->linesize);
+ ppm_save(filename, s->vram, s->width, s->height, ds_get_linesize(s->ds));
}
}
@@ -994,7 +994,7 @@ static uint32_t vmsvga_vram_readb(void *opaque, target_phys_addr_t addr)
struct vmsvga_state_s *s = (struct vmsvga_state_s *) opaque;
addr -= s->vram_base;
if (addr < s->fb_size)
- return *(uint8_t *) (s->ds->data + addr);
+ return *(uint8_t *) (ds_get_data(s->ds) + addr);
else
return *(uint8_t *) (s->vram + addr);
}
@@ -1004,7 +1004,7 @@ static uint32_t vmsvga_vram_readw(void *opaque, target_phys_addr_t addr)
struct vmsvga_state_s *s = (struct vmsvga_state_s *) opaque;
addr -= s->vram_base;
if (addr < s->fb_size)
- return *(uint16_t *) (s->ds->data + addr);
+ return *(uint16_t *) (ds_get_data(s->ds) + addr);
else
return *(uint16_t *) (s->vram + addr);
}
@@ -1014,7 +1014,7 @@ static uint32_t vmsvga_vram_readl(void *opaque, target_phys_addr_t addr)
struct vmsvga_state_s *s = (struct vmsvga_state_s *) opaque;
addr -= s->vram_base;
if (addr < s->fb_size)
- return *(uint32_t *) (s->ds->data + addr);
+ return *(uint32_t *) (ds_get_data(s->ds) + addr);
else
return *(uint32_t *) (s->vram + addr);
}
@@ -1025,7 +1025,7 @@ static void vmsvga_vram_writeb(void *opaque, target_phys_addr_t addr,
struct vmsvga_state_s *s = (struct vmsvga_state_s *) opaque;
addr -= s->vram_base;
if (addr < s->fb_size)
- *(uint8_t *) (s->ds->data + addr) = value;
+ *(uint8_t *) (ds_get_data(s->ds) + addr) = value;
else
*(uint8_t *) (s->vram + addr) = value;
}
@@ -1036,7 +1036,7 @@ static void vmsvga_vram_writew(void *opaque, target_phys_addr_t addr,
struct vmsvga_state_s *s = (struct vmsvga_state_s *) opaque;
addr -= s->vram_base;
if (addr < s->fb_size)
- *(uint16_t *) (s->ds->data + addr) = value;
+ *(uint16_t *) (ds_get_data(s->ds) + addr) = value;
else
*(uint16_t *) (s->vram + addr) = value;
}
@@ -1047,7 +1047,7 @@ static void vmsvga_vram_writel(void *opaque, target_phys_addr_t addr,
struct vmsvga_state_s *s = (struct vmsvga_state_s *) opaque;
addr -= s->vram_base;
if (addr < s->fb_size)
- *(uint32_t *) (s->ds->data + addr) = value;
+ *(uint32_t *) (ds_get_data(s->ds) + addr) = value;
else
*(uint32_t *) (s->vram + addr) = value;
}
diff --git a/kvm-all.c b/kvm-all.c
index e44f296b0..39f826bbe 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -2,9 +2,11 @@
* QEMU KVM support
*
* Copyright IBM, Corp. 2008
+ * Red Hat, Inc. 2008
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
+ * Glauber Costa <gcosta@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
@@ -41,6 +43,8 @@ typedef struct KVMSlot
int flags;
} KVMSlot;
+typedef struct kvm_dirty_log KVMDirtyLog;
+
int kvm_allowed = 0;
struct KVMState
@@ -82,6 +86,20 @@ static KVMSlot *kvm_lookup_slot(KVMState *s, target_phys_addr_t start_addr)
return NULL;
}
+static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
+{
+ struct kvm_userspace_memory_region mem;
+
+ mem.slot = slot->slot;
+ mem.guest_phys_addr = slot->start_addr;
+ mem.memory_size = slot->memory_size;
+ mem.userspace_addr = (unsigned long)phys_ram_base + slot->phys_offset;
+ mem.flags = slot->flags;
+
+ return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
+}
+
+
int kvm_init_vcpu(CPUState *env)
{
KVMState *s = kvm_state;
@@ -119,6 +137,97 @@ err:
return ret;
}
+/*
+ * dirty pages logging control
+ */
+static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr, target_phys_addr_t end_addr,
+ unsigned flags,
+ unsigned mask)
+{
+ KVMState *s = kvm_state;
+ KVMSlot *mem = kvm_lookup_slot(s, phys_addr);
+ if (mem == NULL) {
+ dprintf("invalid parameters %llx-%llx\n", phys_addr, end_addr);
+ return -EINVAL;
+ }
+
+ flags = (mem->flags & ~mask) | flags;
+ /* Nothing changed, no need to issue ioctl */
+ if (flags == mem->flags)
+ return 0;
+
+ mem->flags = flags;
+
+ return kvm_set_user_memory_region(s, mem);
+}
+
+int kvm_log_start(target_phys_addr_t phys_addr, target_phys_addr_t end_addr)
+{
+ return kvm_dirty_pages_log_change(phys_addr, end_addr,
+ KVM_MEM_LOG_DIRTY_PAGES,
+ KVM_MEM_LOG_DIRTY_PAGES);
+}
+
+int kvm_log_stop(target_phys_addr_t phys_addr, target_phys_addr_t end_addr)
+{
+ return kvm_dirty_pages_log_change(phys_addr, end_addr,
+ 0,
+ KVM_MEM_LOG_DIRTY_PAGES);
+}
+
+/**
+ * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
+ * This function updates qemu's dirty bitmap using cpu_physical_memory_set_dirty().
+ * This means all bits are set to dirty.
+ *
+ * @start_add: start of logged region. This is what we use to search the memslot
+ * @end_addr: end of logged region.
+ */
+void kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
+{
+ KVMState *s = kvm_state;
+ KVMDirtyLog d;
+ KVMSlot *mem = kvm_lookup_slot(s, start_addr);
+ unsigned long alloc_size;
+ ram_addr_t addr;
+ target_phys_addr_t phys_addr = start_addr;
+
+ dprintf("sync addr: %llx into %lx\n", start_addr, mem->phys_offset);
+ if (mem == NULL) {
+ fprintf(stderr, "BUG: %s: invalid parameters\n", __func__);
+ return;
+ }
+
+ alloc_size = mem->memory_size >> TARGET_PAGE_BITS / sizeof(d.dirty_bitmap);
+ d.dirty_bitmap = qemu_mallocz(alloc_size);
+
+ if (d.dirty_bitmap == NULL) {
+ dprintf("Could not allocate dirty bitmap\n");
+ return;
+ }
+
+ d.slot = mem->slot;
+ dprintf("slot %d, phys_addr %llx, uaddr: %llx\n",
+ d.slot, mem->start_addr, mem->phys_offset);
+
+ if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
+ dprintf("ioctl failed %d\n", errno);
+ goto out;
+ }
+
+ phys_addr = start_addr;
+ for (addr = mem->phys_offset; phys_addr < end_addr; phys_addr+= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
+ unsigned long *bitmap = (unsigned long *)d.dirty_bitmap;
+ unsigned nr = (phys_addr - start_addr) >> TARGET_PAGE_BITS;
+ unsigned word = nr / (sizeof(*bitmap) * 8);
+ unsigned bit = nr % (sizeof(*bitmap) * 8);
+ if ((bitmap[word] >> bit) & 1)
+ cpu_physical_memory_set_dirty(addr);
+ }
+out:
+ qemu_free(d.dirty_bitmap);
+}
+
int kvm_init(int smp_cpus)
{
KVMState *s;
@@ -316,19 +425,6 @@ int kvm_cpu_exec(CPUState *env)
return ret;
}
-static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
-{
- struct kvm_userspace_memory_region mem;
-
- mem.slot = slot->slot;
- mem.guest_phys_addr = slot->start_addr;
- mem.memory_size = slot->memory_size;
- mem.userspace_addr = (unsigned long)phys_ram_base + slot->phys_offset;
- mem.flags = slot->flags;
-
- return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
-}
-
void kvm_set_phys_mem(target_phys_addr_t start_addr,
ram_addr_t size,
ram_addr_t phys_offset)
diff --git a/kvm.h b/kvm.h
index 2f05fffd1..465f03fb9 100644
--- a/kvm.h
+++ b/kvm.h
@@ -40,6 +40,10 @@ void kvm_set_phys_mem(target_phys_addr_t start_addr,
ram_addr_t size,
ram_addr_t phys_offset);
+void kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr);
+
+int kvm_log_start(target_phys_addr_t phys_addr, target_phys_addr_t len);
+int kvm_log_stop(target_phys_addr_t phys_addr, target_phys_addr_t len);
/* internal API */
struct KVMState;
diff --git a/linux-user/signal.c b/linux-user/signal.c
index 1dd634f79..e0f6aaf73 100644
--- a/linux-user/signal.c
+++ b/linux-user/signal.c
@@ -361,10 +361,15 @@ int queue_signal(CPUState *env, int sig, target_siginfo_t *info)
k = &ts->sigtab[sig - 1];
handler = sigact_table[sig - 1]._sa_handler;
if (handler == TARGET_SIG_DFL) {
+ if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
+ kill(getpid(),SIGSTOP);
+ return 0;
+ } else
/* default handler : ignore some signal. The other are fatal */
if (sig != TARGET_SIGCHLD &&
sig != TARGET_SIGURG &&
- sig != TARGET_SIGWINCH) {
+ sig != TARGET_SIGWINCH &&
+ sig != TARGET_SIGCONT) {
force_sig(sig);
} else {
return 0; /* indicate ignored */
@@ -502,7 +507,7 @@ int do_sigaction(int sig, const struct target_sigaction *act,
int host_sig;
int ret = 0;
- if (sig < 1 || sig > TARGET_NSIG || sig == SIGKILL || sig == SIGSTOP)
+ if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)
return -EINVAL;
k = &sigact_table[sig - 1];
#if defined(DEBUG_SIGNAL)
diff --git a/m68k-dis.c b/m68k-dis.c
index 5a4ece5a3..9978caac5 100644
--- a/m68k-dis.c
+++ b/m68k-dis.c
@@ -7,7 +7,7 @@
#include "dis-asm.h"
-/* **** foatformat.h from sourceware.org CVS 2005-08-14. */
+/* **** floatformat.h from sourceware.org CVS 2005-08-14. */
/* IEEE floating point support declarations, for GDB, the GNU Debugger.
Copyright 1991, 1994, 1995, 1997, 2000, 2003 Free Software Foundation, Inc.
diff --git a/qemu-char.c b/qemu-char.c
index d2335dd60..27cdd325a 100644
--- a/qemu-char.c
+++ b/qemu-char.c
@@ -452,7 +452,6 @@ int send_all(int fd, const void *buf, int len1)
while (len > 0) {
ret = send(fd, buf, len, 0);
if (ret < 0) {
- int errno;
errno = WSAGetLastError();
if (errno != WSAEWOULDBLOCK) {
return -1;
diff --git a/qemu-doc.texi b/qemu-doc.texi
index 6f507014d..d2fabdaf3 100644
--- a/qemu-doc.texi
+++ b/qemu-doc.texi
@@ -399,7 +399,7 @@ time). This option is needed to have correct date in MS-DOS or
Windows.
@item -startdate @var{date}
-Set the initial date of the real time clock. Valid format for
+Set the initial date of the real time clock. Valid formats for
@var{date} are: @code{now} or @code{2006-06-17T16:01:21} or
@code{2006-06-17}. The default value is @code{now}.
@@ -424,7 +424,7 @@ This option is useful to load things like EtherBoot.
@item -name @var{name}
Sets the @var{name} of the guest.
-This name will be display in the SDL window caption.
+This name will be displayed in the SDL window caption.
The @var{name} will also be used for the VNC server.
@end table
@@ -624,13 +624,15 @@ Use the user mode network stack which requires no administrator
privilege to run. @option{hostname=name} can be used to specify the client
hostname reported by the builtin DHCP server.
-@item -net tap[,vlan=@var{n}][,fd=@var{h}][,ifname=@var{name}][,script=@var{file}]
-Connect the host TAP network interface @var{name} to VLAN @var{n} and
-use the network script @var{file} to configure it. The default
-network script is @file{/etc/qemu-ifup}. Use @option{script=no} to
-disable script execution. If @var{name} is not
-provided, the OS automatically provides one. @option{fd}=@var{h} can be
-used to specify the handle of an already opened host TAP interface. Example:
+@item -net tap[,vlan=@var{n}][,fd=@var{h}][,ifname=@var{name}][,script=@var{file}][,downscript=@var{dfile}]
+Connect the host TAP network interface @var{name} to VLAN @var{n}, use
+the network script @var{file} to configure it and the network script
+@var{dfile} to deconfigure it. If @var{name} is not provided, the OS
+automatically provides one. @option{fd}=@var{h} can be used to specify
+the handle of an already opened host TAP interface. The default network
+configure script is @file{/etc/qemu-ifup} and the default network
+deconfigure script is @file{/etc/qemu-ifdown}. Use @option{script=no}
+or @option{downscript=no} to disable script execution. Example:
@example
qemu linux.img -net nic -net tap
diff --git a/qemu-kvm.c b/qemu-kvm.c
index b6c828852..2f50d261f 100644
--- a/qemu-kvm.c
+++ b/qemu-kvm.c
@@ -879,13 +879,15 @@ int kvm_update_debugger(CPUState *env)
memset(data.dbg.breakpoints, 0, sizeof(data.dbg.breakpoints));
data.dbg.enabled = 0;
- if (env->breakpoints || env->singlestep_enabled) {
- bp = env->breakpoints;
+ if (!TAILQ_EMPTY(&env->breakpoints) || env->singlestep_enabled) {
+ bp = TAILQ_FIRST(&env->breakpoints);
data.dbg.enabled = 1;
- for (i = 0; i < 4 && bp; ++i) {
- data.dbg.breakpoints[i].enabled = 1;
- data.dbg.breakpoints[i].address = bp->pc;
- bp = bp->next;
+ for (i = 0; i < 4; ++i) {
+ data.dbg.breakpoints[i].enabled = bp != NULL;
+ if (bp) {
+ data.dbg.breakpoints[i].address = bp->pc;
+ bp = TAILQ_NEXT(bp, entry);
+ }
}
data.dbg.singlestep = env->singlestep_enabled;
}
@@ -937,7 +939,8 @@ int kvm_get_dirty_pages_log_range(unsigned long start_addr,
{
unsigned int i, j, n=0;
unsigned char c;
- unsigned page_number, addr, addr1;
+ unsigned long page_number, addr, addr1;
+ ram_addr_t ram_addr;
unsigned int len = ((mem_size/TARGET_PAGE_SIZE) + 7) / 8;
/*
@@ -952,7 +955,8 @@ int kvm_get_dirty_pages_log_range(unsigned long start_addr,
page_number = i * 8 + j;
addr1 = page_number * TARGET_PAGE_SIZE;
addr = offset + addr1;
- cpu_physical_memory_set_dirty(addr);
+ ram_addr = cpu_get_physical_page_desc(addr);
+ cpu_physical_memory_set_dirty(ram_addr);
n++;
}
}
@@ -1064,5 +1068,32 @@ void kvm_ioperm(CPUState *env, void *data)
if (kvm_enabled() && qemu_system_ready)
on_vcpu(env, kvm_arch_do_ioperm, data);
}
+
+#endif
+
+void kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
+{
+ void *buf;
+
+ buf = qemu_malloc((end_addr - start_addr) / 8 + 2);
+ kvm_get_dirty_pages_range(kvm_context, start_addr, end_addr - start_addr,
+ buf, NULL, kvm_get_dirty_bitmap_cb);
+ qemu_free(buf);
+}
+
+int kvm_log_start(target_phys_addr_t phys_addr, target_phys_addr_t len)
+{
+#ifndef TARGET_IA64
+ kvm_qemu_log_memory(phys_addr, len, 1);
+#endif
+ return 0;
+}
+
+int kvm_log_stop(target_phys_addr_t phys_addr, target_phys_addr_t len)
+{
+#ifndef TARGET_IA64
+ kvm_qemu_log_memory(phys_addr, len, 0);
#endif
+ return 0;
+}
diff --git a/qemu-kvm.h b/qemu-kvm.h
index d05d969f2..3aef05a2d 100644
--- a/qemu-kvm.h
+++ b/qemu-kvm.h
@@ -156,4 +156,9 @@ static inline void kvm_set_phys_mem(target_phys_addr_t start_addr,
}
+void kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr);
+
+int kvm_log_start(target_phys_addr_t phys_addr, target_phys_addr_t len);
+int kvm_log_stop(target_phys_addr_t phys_addr, target_phys_addr_t len);
+
#endif
diff --git a/qemu-tool.c b/qemu-tool.c
index 7211da614..30a6ac739 100644
--- a/qemu-tool.c
+++ b/qemu-tool.c
@@ -77,7 +77,7 @@ int qemu_set_fd_handler2(int fd,
int64_t qemu_get_clock(QEMUClock *clock)
{
- struct timeval tv;
+ qemu_timeval tv;
qemu_gettimeofday(&tv);
return (tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000)) / 1000000;
}
diff --git a/sys-queue.h b/sys-queue.h
index 3d0773eeb..ad5c8fb5f 100644
--- a/sys-queue.h
+++ b/sys-queue.h
@@ -210,6 +210,11 @@ struct { \
(var); \
(var) = ((var)->field.tqe_next))
+#define TAILQ_FOREACH_SAFE(var, head, field, next_var) \
+ for ((var) = ((head)->tqh_first); \
+ (var) && ((next_var) = ((var)->field.tqe_next), 1); \
+ (var) = (next_var))
+
#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \
(var); \
diff --git a/target-alpha/cpu.h b/target-alpha/cpu.h
index 122e2c29e..65be4fb5e 100644
--- a/target-alpha/cpu.h
+++ b/target-alpha/cpu.h
@@ -409,7 +409,6 @@ int cpu_alpha_signal_handler(int host_signum, void *pinfo,
void *puc);
int cpu_alpha_mfpr (CPUState *env, int iprn, uint64_t *valp);
int cpu_alpha_mtpr (CPUState *env, int iprn, uint64_t val, uint64_t *oldvalp);
-void cpu_loop_exit (void);
void pal_init (CPUState *env);
#if !defined (CONFIG_USER_ONLY)
void call_pal (CPUState *env);
diff --git a/target-alpha/translate.c b/target-alpha/translate.c
index 3b90f6292..7a0e54feb 100644
--- a/target-alpha/translate.c
+++ b/target-alpha/translate.c
@@ -2363,8 +2363,8 @@ static always_inline void gen_intermediate_code_internal (CPUState *env,
gen_icount_start();
for (ret = 0; ret == 0;) {
- if (unlikely(env->breakpoints)) {
- for (bp = env->breakpoints; bp != NULL; bp = bp->next) {
+ if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
+ TAILQ_FOREACH(bp, &env->breakpoints, entry) {
if (bp->pc == ctx.pc) {
gen_excp(&ctx, EXCP_DEBUG, 0);
break;
diff --git a/target-arm/exec.h b/target-arm/exec.h
index c543cf4a4..88f08862d 100644
--- a/target-arm/exec.h
+++ b/target-arm/exec.h
@@ -58,6 +58,4 @@ static inline int cpu_halted(CPUState *env) {
#include "softmmu_exec.h"
#endif
-void cpu_loop_exit(void);
-
void raise_exception(int);
diff --git a/target-arm/translate.c b/target-arm/translate.c
index 54eb0670b..f984de7c5 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -8677,8 +8677,8 @@ static inline void gen_intermediate_code_internal(CPUState *env,
}
#endif
- if (unlikely(env->breakpoints)) {
- for (bp = env->breakpoints; bp != NULL; bp = bp->next) {
+ if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
+ TAILQ_FOREACH(bp, &env->breakpoints, entry) {
if (bp->pc == dc->pc) {
gen_set_condexec(dc);
gen_set_pc_im(dc->pc);
diff --git a/target-cris/exec.h b/target-cris/exec.h
index 61714738d..ec7346acd 100644
--- a/target-cris/exec.h
+++ b/target-cris/exec.h
@@ -45,8 +45,6 @@ int cpu_cris_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
void cpu_cris_flush_flags(CPUCRISState *env, int cc_op);
void helper_movec(CPUCRISState *env, int reg, uint32_t val);
-void cpu_loop_exit(void);
-
static inline int cpu_halted(CPUState *env) {
if (!env->halted)
return 0;
diff --git a/target-cris/translate.c b/target-cris/translate.c
index ac258a9de..242ef9c10 100644
--- a/target-cris/translate.c
+++ b/target-cris/translate.c
@@ -3189,8 +3189,8 @@ static void check_breakpoint(CPUState *env, DisasContext *dc)
{
CPUBreakpoint *bp;
- if (unlikely(env->breakpoints)) {
- for (bp = env->breakpoints; bp != NULL; bp = bp->next) {
+ if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
+ TAILQ_FOREACH(bp, &env->breakpoints, entry) {
if (bp->pc == dc->pc) {
cris_evaluate_flags (dc);
tcg_gen_movi_tl(env_pc, dc->pc);
diff --git a/target-i386/exec.h b/target-i386/exec.h
index 72ffdabdb..36631665e 100644
--- a/target-i386/exec.h
+++ b/target-i386/exec.h
@@ -72,10 +72,6 @@ void raise_interrupt(int intno, int is_int, int error_code,
void raise_exception_err(int exception_index, int error_code);
void raise_exception(int exception_index);
void do_smm_enter(void);
-void __hidden cpu_loop_exit(void);
-
-void OPPROTO op_movl_eflags_T0(void);
-void OPPROTO op_movl_T0_eflags(void);
/* n must be a constant to be efficient */
static inline target_long lshift(target_long x, int n)
diff --git a/target-i386/helper.c b/target-i386/helper.c
index 4124b7265..0d3e3cc47 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -1371,7 +1371,7 @@ static void breakpoint_handler(CPUState *env)
cpu_resume_from_signal(env, NULL);
}
} else {
- for (bp = env->breakpoints; bp != NULL; bp = bp->next)
+ TAILQ_FOREACH(bp, &env->breakpoints, entry)
if (bp->pc == env->eip) {
if (bp->flags & BP_CPU) {
check_hw_breakpoints(env, 1);
diff --git a/target-i386/translate.c b/target-i386/translate.c
index 0de238b57..612811b02 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -7606,8 +7606,8 @@ static inline void gen_intermediate_code_internal(CPUState *env,
gen_icount_start();
for(;;) {
- if (unlikely(env->breakpoints)) {
- for (bp = env->breakpoints; bp != NULL; bp = bp->next) {
+ if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
+ TAILQ_FOREACH(bp, &env->breakpoints, entry) {
if (bp->pc == pc_ptr) {
gen_debug(dc, pc_ptr - dc->cs_base);
break;
diff --git a/target-m68k/exec.h b/target-m68k/exec.h
index 745606ec0..f67941d22 100644
--- a/target-m68k/exec.h
+++ b/target-m68k/exec.h
@@ -44,8 +44,6 @@ int cpu_m68k_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
#include "softmmu_exec.h"
#endif
-void cpu_loop_exit(void);
-
static inline int cpu_halted(CPUState *env) {
if (!env->halted)
return 0;
diff --git a/target-m68k/translate.c b/target-m68k/translate.c
index a14f6c560..bc2fe2b61 100644
--- a/target-m68k/translate.c
+++ b/target-m68k/translate.c
@@ -503,7 +503,7 @@ static inline TCGv gen_extend(TCGv val, int opsize, int sign)
}
/* Generate code for an "effective address". Does not adjust the base
- register for autoincrememnt addressing modes. */
+ register for autoincrement addressing modes. */
static TCGv gen_lea(DisasContext *s, uint16_t insn, int opsize)
{
TCGv reg;
@@ -2999,8 +2999,8 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
do {
pc_offset = dc->pc - pc_start;
gen_throws_exception = NULL;
- if (unlikely(env->breakpoints)) {
- for (bp = env->breakpoints; bp != NULL; bp = bp->next) {
+ if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
+ TAILQ_FOREACH(bp, &env->breakpoints, entry) {
if (bp->pc == dc->pc) {
gen_exception(dc, dc->pc, EXCP_DEBUG);
dc->is_jmp = DISAS_JUMP;
diff --git a/target-mips/exec.h b/target-mips/exec.h
index 5d3e35607..8ab64370f 100644
--- a/target-mips/exec.h
+++ b/target-mips/exec.h
@@ -29,7 +29,6 @@ int cpu_mips_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
void do_interrupt (CPUState *env);
void r4k_invalidate_tlb (CPUState *env, int idx, int use_extra);
-void cpu_loop_exit(void);
void do_raise_exception_err (uint32_t exception, int error_code);
void do_raise_exception (uint32_t exception);
diff --git a/target-mips/translate.c b/target-mips/translate.c
index cc7e71c57..418b9ef5d 100644
--- a/target-mips/translate.c
+++ b/target-mips/translate.c
@@ -8286,8 +8286,8 @@ gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb,
#endif
gen_icount_start();
while (ctx.bstate == BS_NONE) {
- if (unlikely(env->breakpoints)) {
- for (bp = env->breakpoints; bp != NULL; bp = bp->next) {
+ if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
+ TAILQ_FOREACH(bp, &env->breakpoints, entry) {
if (bp->pc == ctx.pc) {
save_cpu_state(&ctx, 1);
ctx.bstate = BS_BRANCH;
diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h
index de9e82c61..b39bbad1f 100644
--- a/target-ppc/cpu.h
+++ b/target-ppc/cpu.h
@@ -537,13 +537,6 @@ struct CPUPPCState {
#if (TARGET_LONG_BITS > HOST_LONG_BITS) || defined(HOST_I386)
target_ulong t2;
#endif
-#if !defined(TARGET_PPC64)
- /* temporary fixed-point registers
- * used to emulate 64 bits registers on 32 bits targets
- */
- uint64_t t0_64, t1_64, t2_64;
-#endif
- ppc_avr_t avr0, avr1, avr2;
/* general purpose registers */
target_ulong gpr[32];
@@ -569,9 +562,6 @@ struct CPUPPCState {
target_ulong tgpr[4]; /* Used to speed-up TLB assist handlers */
/* Floating point execution context */
- /* temporary float registers */
- float64 ft0;
- float64 ft1;
float_status fp_status;
/* floating point registers */
float64 fpr[32];
@@ -700,7 +690,6 @@ int cpu_ppc_signal_handler (int host_signum, void *pinfo,
void do_interrupt (CPUPPCState *env);
void ppc_hw_interrupt (CPUPPCState *env);
-void cpu_loop_exit (void);
void dump_stack (CPUPPCState *env);
diff --git a/target-ppc/exec.h b/target-ppc/exec.h
index 88499b170..67cb8b437 100644
--- a/target-ppc/exec.h
+++ b/target-ppc/exec.h
@@ -44,23 +44,6 @@ register target_ulong T1 asm(AREG2);
register target_ulong T2 asm(AREG3);
#define TDX "%016lx"
#endif
-/* We may, sometime, need 64 bits registers on 32 bits targets */
-#if !defined(TARGET_PPC64)
-#define T0_64 (env->t0_64)
-#define T1_64 (env->t1_64)
-#define T2_64 (env->t2_64)
-#else
-#define T0_64 T0
-#define T1_64 T1
-#define T2_64 T2
-#endif
-/* Provision for Altivec */
-#define AVR0 (env->avr0)
-#define AVR1 (env->avr1)
-#define AVR2 (env->avr2)
-
-#define FT0 (env->ft0)
-#define FT1 (env->ft1)
#if defined (DEBUG_OP)
# define RETURN() __asm__ __volatile__("nop" : : : "memory");
diff --git a/target-ppc/helper.h b/target-ppc/helper.h
index 3fe568169..f52cfce87 100644
--- a/target-ppc/helper.h
+++ b/target-ppc/helper.h
@@ -2,9 +2,32 @@
DEF_HELPER_2(raise_exception_err, void, i32, i32)
DEF_HELPER_0(raise_debug, void)
+DEF_HELPER_3(tw, void, tl, tl, i32)
+#if defined(TARGET_PPC64)
+DEF_HELPER_3(td, void, tl, tl, i32)
+#endif
+#if !defined(CONFIG_USER_ONLY)
+DEF_HELPER_0(rfi, void)
+DEF_HELPER_0(rfsvc, void)
+DEF_HELPER_0(40x_rfci, void)
+DEF_HELPER_0(rfci, void)
+DEF_HELPER_0(rfdi, void)
+DEF_HELPER_0(rfmci, void)
+#if defined(TARGET_PPC64)
+DEF_HELPER_0(rfid, void)
+DEF_HELPER_0(hrfid, void)
+#endif
+#endif
-DEF_HELPER_2(fcmpo, i32, i64, i64)
-DEF_HELPER_2(fcmpu, i32, i64, i64)
+DEF_HELPER_2(lmw, void, tl, i32)
+DEF_HELPER_2(stmw, void, tl, i32)
+DEF_HELPER_3(lsw, void, tl, i32, i32)
+DEF_HELPER_4(lswx, void, tl, i32, i32, i32)
+DEF_HELPER_3(stsw, void, tl, i32, i32)
+DEF_HELPER_1(dcbz, void, tl)
+DEF_HELPER_1(dcbz_970, void, tl)
+DEF_HELPER_1(icbi, void, tl)
+DEF_HELPER_4(lscbx, tl, tl, i32, i32, i32)
DEF_HELPER_0(load_cr, tl)
DEF_HELPER_2(store_cr, void, tl, i32)
@@ -35,6 +58,11 @@ DEF_HELPER_0(reset_fpstatus, void)
DEF_HELPER_2(compute_fprf, i32, i64, i32)
DEF_HELPER_2(store_fpscr, void, i64, i32)
DEF_HELPER_1(fpscr_setbit, void, i32)
+DEF_HELPER_1(float64_to_float32, i32, i64)
+DEF_HELPER_1(float32_to_float64, i64, i32)
+
+DEF_HELPER_2(fcmpo, i32, i64, i64)
+DEF_HELPER_2(fcmpu, i32, i64, i64)
DEF_HELPER_1(fctiw, i64, i64)
DEF_HELPER_1(fctiwz, i64, i64)
@@ -133,4 +161,13 @@ DEF_HELPER_2(efdcmplt, i32, i64, i64)
DEF_HELPER_2(efdcmpgt, i32, i64, i64)
DEF_HELPER_2(efdcmpeq, i32, i64, i64)
+#if !defined(CONFIG_USER_ONLY)
+DEF_HELPER_1(load_6xx_tlbd, void, tl)
+DEF_HELPER_1(load_6xx_tlbi, void, tl)
+DEF_HELPER_1(load_74xx_tlbd, void, tl)
+DEF_HELPER_1(load_74xx_tlbi, void, tl)
+
+DEF_HELPER_1(602_mfrom, tl, tl)
+#endif
+
#include "def-helper.h"
diff --git a/target-ppc/op.c b/target-ppc/op.c
index ee2f36ece..6be877776 100644
--- a/target-ppc/op.c
+++ b/target-ppc/op.c
@@ -257,67 +257,8 @@ void OPPROTO op_srli_T1 (void)
RETURN();
}
-/* Load and store */
-#define MEMSUFFIX _raw
-#include "op_helper.h"
-#include "op_mem.h"
-#if !defined(CONFIG_USER_ONLY)
-#define MEMSUFFIX _user
-#include "op_helper.h"
-#include "op_mem.h"
-#define MEMSUFFIX _kernel
-#include "op_helper.h"
-#include "op_mem.h"
-#define MEMSUFFIX _hypv
-#include "op_helper.h"
-#include "op_mem.h"
-#endif
-
-/* Special op to check and maybe clear reservation */
-void OPPROTO op_check_reservation (void)
-{
- if ((uint32_t)env->reserve == (uint32_t)(T0 & ~0x00000003))
- env->reserve = (target_ulong)-1ULL;
- RETURN();
-}
-
-#if defined(TARGET_PPC64)
-void OPPROTO op_check_reservation_64 (void)
-{
- if ((uint64_t)env->reserve == (uint64_t)(T0 & ~0x00000003))
- env->reserve = (target_ulong)-1ULL;
- RETURN();
-}
-#endif
-
-void OPPROTO op_wait (void)
-{
- env->halted = 1;
- RETURN();
-}
-
/* Return from interrupt */
#if !defined(CONFIG_USER_ONLY)
-void OPPROTO op_rfi (void)
-{
- do_rfi();
- RETURN();
-}
-
-#if defined(TARGET_PPC64)
-void OPPROTO op_rfid (void)
-{
- do_rfid();
- RETURN();
-}
-
-void OPPROTO op_hrfid (void)
-{
- do_hrfid();
- RETURN();
-}
-#endif
-
/* Exception vectors */
void OPPROTO op_store_excp_prefix (void)
{
@@ -334,21 +275,6 @@ void OPPROTO op_store_excp_vector (void)
}
#endif
-/* Trap word */
-void OPPROTO op_tw (void)
-{
- do_tw(PARAM1);
- RETURN();
-}
-
-#if defined(TARGET_PPC64)
-void OPPROTO op_td (void)
-{
- do_td(PARAM1);
- RETURN();
-}
-#endif
-
#if !defined(CONFIG_USER_ONLY)
/* tlbia */
void OPPROTO op_tlbia (void)
@@ -393,34 +319,6 @@ void OPPROTO op_slbie_64 (void)
#endif
#endif
-#if !defined(CONFIG_USER_ONLY)
-/* PowerPC 602/603/755 software TLB load instructions */
-void OPPROTO op_6xx_tlbld (void)
-{
- do_load_6xx_tlb(0);
- RETURN();
-}
-
-void OPPROTO op_6xx_tlbli (void)
-{
- do_load_6xx_tlb(1);
- RETURN();
-}
-
-/* PowerPC 74xx software TLB load instructions */
-void OPPROTO op_74xx_tlbld (void)
-{
- do_load_74xx_tlb(0);
- RETURN();
-}
-
-void OPPROTO op_74xx_tlbli (void)
-{
- do_load_74xx_tlb(1);
- RETURN();
-}
-#endif
-
/* 601 specific */
void OPPROTO op_load_601_rtcl (void)
{
@@ -534,12 +432,6 @@ void OPPROTO op_POWER_dozo (void)
RETURN();
}
-void OPPROTO op_load_xer_cmp (void)
-{
- T2 = xer_cmp;
- RETURN();
-}
-
void OPPROTO op_POWER_maskg (void)
{
do_POWER_maskg();
@@ -725,21 +617,6 @@ void OPPROTO op_POWER_rac (void)
do_POWER_rac();
RETURN();
}
-
-void OPPROTO op_POWER_rfsvc (void)
-{
- do_POWER_rfsvc();
- RETURN();
-}
-#endif
-
-/* PowerPC 602 specific instruction */
-#if !defined(CONFIG_USER_ONLY)
-void OPPROTO op_602_mfrom (void)
-{
- do_op_602_mfrom();
- RETURN();
-}
#endif
/* PowerPC 4xx specific micro-ops */
@@ -756,33 +633,6 @@ void OPPROTO op_store_dcr (void)
}
#if !defined(CONFIG_USER_ONLY)
-/* Return from critical interrupt :
- * same as rfi, except nip & MSR are loaded from SRR2/3 instead of SRR0/1
- */
-void OPPROTO op_40x_rfci (void)
-{
- do_40x_rfci();
- RETURN();
-}
-
-void OPPROTO op_rfci (void)
-{
- do_rfci();
- RETURN();
-}
-
-void OPPROTO op_rfdi (void)
-{
- do_rfdi();
- RETURN();
-}
-
-void OPPROTO op_rfmci (void)
-{
- do_rfmci();
- RETURN();
-}
-
void OPPROTO op_wrte (void)
{
/* We don't call do_store_msr here as we won't trigger
@@ -929,35 +779,3 @@ void OPPROTO op_store_booke_tsr (void)
}
#endif /* !defined(CONFIG_USER_ONLY) */
-/* SPE extension */
-void OPPROTO op_splatw_T1_64 (void)
-{
- T1_64 = (T1_64 << 32) | (T1_64 & 0x00000000FFFFFFFFULL);
- RETURN();
-}
-
-void OPPROTO op_extsh_T1_64 (void)
-{
- T1_64 = (int32_t)((int16_t)T1_64);
- RETURN();
-}
-
-void OPPROTO op_sli16_T1_64 (void)
-{
- T1_64 = T1_64 << 16;
- RETURN();
-}
-
-void OPPROTO op_sli32_T1_64 (void)
-{
- T1_64 = T1_64 << 32;
- RETURN();
-}
-
-void OPPROTO op_srli32_T1_64 (void)
-{
- T1_64 = T1_64 >> 32;
- RETURN();
-}
-
-
diff --git a/target-ppc/op_helper.c b/target-ppc/op_helper.c
index 6ea3ba3da..a5bd9737f 100644
--- a/target-ppc/op_helper.c
+++ b/target-ppc/op_helper.c
@@ -24,21 +24,6 @@
#include "helper_regs.h"
#include "op_helper.h"
-#define MEMSUFFIX _raw
-#include "op_helper.h"
-#include "op_helper_mem.h"
-#if !defined(CONFIG_USER_ONLY)
-#define MEMSUFFIX _user
-#include "op_helper.h"
-#include "op_helper_mem.h"
-#define MEMSUFFIX _kernel
-#include "op_helper.h"
-#include "op_helper_mem.h"
-#define MEMSUFFIX _hypv
-#include "op_helper.h"
-#include "op_helper_mem.h"
-#endif
-
//#define DEBUG_OP
//#define DEBUG_EXCEPTIONS
//#define DEBUG_SOFTWARE_TLB
@@ -56,7 +41,6 @@ void helper_raise_debug (void)
raise_exception(env, EXCP_DEBUG);
}
-
/*****************************************************************************/
/* Registers load and stores */
target_ulong helper_load_cr (void)
@@ -109,6 +93,267 @@ void ppc_store_dump_spr (int sprn, target_ulong val)
}
/*****************************************************************************/
+/* Memory load and stores */
+
+static always_inline target_ulong get_addr(target_ulong addr)
+{
+#if defined(TARGET_PPC64)
+ if (msr_sf)
+ return addr;
+ else
+#endif
+ return (uint32_t)addr;
+}
+
+void helper_lmw (target_ulong addr, uint32_t reg)
+{
+#ifdef CONFIG_USER_ONLY
+#define ldfun ldl_raw
+#else
+ int (*ldfun)(target_ulong);
+
+ switch (env->mmu_idx) {
+ default:
+ case 0: ldfun = ldl_user;
+ break;
+ case 1: ldfun = ldl_kernel;
+ break;
+ case 2: ldfun = ldl_hypv;
+ break;
+ }
+#endif
+ for (; reg < 32; reg++, addr += 4) {
+ if (msr_le)
+ env->gpr[reg] = bswap32(ldfun(get_addr(addr)));
+ else
+ env->gpr[reg] = ldfun(get_addr(addr));
+ }
+}
+
+void helper_stmw (target_ulong addr, uint32_t reg)
+{
+#ifdef CONFIG_USER_ONLY
+#define stfun stl_raw
+#else
+ void (*stfun)(target_ulong, int);
+
+ switch (env->mmu_idx) {
+ default:
+ case 0: stfun = stl_user;
+ break;
+ case 1: stfun = stl_kernel;
+ break;
+ case 2: stfun = stl_hypv;
+ break;
+ }
+#endif
+ for (; reg < 32; reg++, addr += 4) {
+ if (msr_le)
+ stfun(get_addr(addr), bswap32((uint32_t)env->gpr[reg]));
+ else
+ stfun(get_addr(addr), (uint32_t)env->gpr[reg]);
+ }
+}
+
+void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
+{
+ int sh;
+#ifdef CONFIG_USER_ONLY
+#define ldfunl ldl_raw
+#define ldfunb ldub_raw
+#else
+ int (*ldfunl)(target_ulong);
+ int (*ldfunb)(target_ulong);
+
+ switch (env->mmu_idx) {
+ default:
+ case 0:
+ ldfunl = ldl_user;
+ ldfunb = ldub_user;
+ break;
+ case 1:
+ ldfunl = ldl_kernel;
+ ldfunb = ldub_kernel;
+ break;
+ case 2:
+ ldfunl = ldl_hypv;
+ ldfunb = ldub_hypv;
+ break;
+ }
+#endif
+ for (; nb > 3; nb -= 4, addr += 4) {
+ env->gpr[reg] = ldfunl(get_addr(addr));
+ reg = (reg + 1) % 32;
+ }
+ if (unlikely(nb > 0)) {
+ env->gpr[reg] = 0;
+ for (sh = 24; nb > 0; nb--, addr++, sh -= 8) {
+ env->gpr[reg] |= ldfunb(get_addr(addr)) << sh;
+ }
+ }
+}
+/* PPC32 specification says we must generate an exception if
+ * rA is in the range of registers to be loaded.
+ * In an other hand, IBM says this is valid, but rA won't be loaded.
+ * For now, I'll follow the spec...
+ */
+void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
+{
+ if (likely(xer_bc != 0)) {
+ if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
+ (reg < rb && (reg + xer_bc) > rb))) {
+ raise_exception_err(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL |
+ POWERPC_EXCP_INVAL_LSWX);
+ } else {
+ helper_lsw(addr, xer_bc, reg);
+ }
+ }
+}
+
+void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
+{
+ int sh;
+#ifdef CONFIG_USER_ONLY
+#define stfunl stl_raw
+#define stfunb stb_raw
+#else
+ void (*stfunl)(target_ulong, int);
+ void (*stfunb)(target_ulong, int);
+
+ switch (env->mmu_idx) {
+ default:
+ case 0:
+ stfunl = stl_user;
+ stfunb = stb_user;
+ break;
+ case 1:
+ stfunl = stl_kernel;
+ stfunb = stb_kernel;
+ break;
+ case 2:
+ stfunl = stl_hypv;
+ stfunb = stb_hypv;
+ break;
+ }
+#endif
+
+ for (; nb > 3; nb -= 4, addr += 4) {
+ stfunl(get_addr(addr), env->gpr[reg]);
+ reg = (reg + 1) % 32;
+ }
+ if (unlikely(nb > 0)) {
+ for (sh = 24; nb > 0; nb--, addr++, sh -= 8)
+ stfunb(get_addr(addr), (env->gpr[reg] >> sh) & 0xFF);
+ }
+}
+
+static void do_dcbz(target_ulong addr, int dcache_line_size)
+{
+ target_long mask = get_addr(~(dcache_line_size - 1));
+ int i;
+#ifdef CONFIG_USER_ONLY
+#define stfun stl_raw
+#else
+ void (*stfun)(target_ulong, int);
+
+ switch (env->mmu_idx) {
+ default:
+ case 0: stfun = stl_user;
+ break;
+ case 1: stfun = stl_kernel;
+ break;
+ case 2: stfun = stl_hypv;
+ break;
+ }
+#endif
+ addr &= mask;
+ for (i = 0 ; i < dcache_line_size ; i += 4) {
+ stfun(addr + i , 0);
+ }
+ if ((env->reserve & mask) == addr)
+ env->reserve = (target_ulong)-1ULL;
+}
+
+void helper_dcbz(target_ulong addr)
+{
+ do_dcbz(addr, env->dcache_line_size);
+}
+
+void helper_dcbz_970(target_ulong addr)
+{
+ if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
+ do_dcbz(addr, 32);
+ else
+ do_dcbz(addr, env->dcache_line_size);
+}
+
+void helper_icbi(target_ulong addr)
+{
+ uint32_t tmp;
+
+ addr = get_addr(addr & ~(env->dcache_line_size - 1));
+ /* Invalidate one cache line :
+ * PowerPC specification says this is to be treated like a load
+ * (not a fetch) by the MMU. To be sure it will be so,
+ * do the load "by hand".
+ */
+#ifdef CONFIG_USER_ONLY
+ tmp = ldl_raw(addr);
+#else
+ switch (env->mmu_idx) {
+ default:
+ case 0: tmp = ldl_user(addr);
+ break;
+ case 1: tmp = ldl_kernel(addr);
+ break;
+ case 2: tmp = ldl_hypv(addr);
+ break;
+ }
+#endif
+ tb_invalidate_page_range(addr, addr + env->icache_line_size);
+}
+
+// XXX: to be tested
+target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
+{
+ int i, c, d;
+#ifdef CONFIG_USER_ONLY
+#define ldfun ldub_raw
+#else
+ int (*ldfun)(target_ulong);
+
+ switch (env->mmu_idx) {
+ default:
+ case 0: ldfun = ldub_user;
+ break;
+ case 1: ldfun = ldub_kernel;
+ break;
+ case 2: ldfun = ldub_hypv;
+ break;
+ }
+#endif
+ d = 24;
+ for (i = 0; i < xer_bc; i++) {
+ c = ldfun((uint32_t)addr++);
+ /* ra (if not 0) and rb are never modified */
+ if (likely(reg != rb && (ra == 0 || reg != ra))) {
+ env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
+ }
+ if (unlikely(c == xer_cmp))
+ break;
+ if (likely(d != 0)) {
+ d -= 8;
+ } else {
+ d = 24;
+ reg++;
+ reg = reg & 0x1F;
+ }
+ }
+ return i;
+}
+
+/*****************************************************************************/
/* Fixed point operations helpers */
#if defined(TARGET_PPC64)
@@ -237,6 +482,24 @@ target_ulong helper_popcntb_64 (target_ulong val)
/*****************************************************************************/
/* Floating point operations helpers */
+uint64_t helper_float32_to_float64(uint32_t arg)
+{
+ CPU_FloatU f;
+ CPU_DoubleU d;
+ f.l = arg;
+ d.d = float32_to_float64(f.f, &env->fp_status);
+ return d.ll;
+}
+
+uint32_t helper_float64_to_float32(uint64_t arg)
+{
+ CPU_FloatU f;
+ CPU_DoubleU d;
+ d.ll = arg;
+ f.f = float64_to_float32(d.d, &env->fp_status);
+ return f.l;
+}
+
static always_inline int fpisneg (float64 d)
{
CPU_DoubleU u;
@@ -1138,7 +1401,6 @@ uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
return farg1.ll;
}
-
/* frsp - frsp. */
uint64_t helper_frsp (uint64_t arg)
{
@@ -1356,7 +1618,7 @@ void do_store_msr (void)
}
}
-static always_inline void __do_rfi (target_ulong nip, target_ulong msr,
+static always_inline void do_rfi (target_ulong nip, target_ulong msr,
target_ulong msrm, int keep_msrh)
{
#if defined(TARGET_PPC64)
@@ -1385,46 +1647,46 @@ static always_inline void __do_rfi (target_ulong nip, target_ulong msr,
env->interrupt_request |= CPU_INTERRUPT_EXITTB;
}
-void do_rfi (void)
+void helper_rfi (void)
{
- __do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
- ~((target_ulong)0xFFFF0000), 1);
+ do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
+ ~((target_ulong)0xFFFF0000), 1);
}
#if defined(TARGET_PPC64)
-void do_rfid (void)
+void helper_rfid (void)
{
- __do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
- ~((target_ulong)0xFFFF0000), 0);
+ do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
+ ~((target_ulong)0xFFFF0000), 0);
}
-void do_hrfid (void)
+void helper_hrfid (void)
{
- __do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
- ~((target_ulong)0xFFFF0000), 0);
+ do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
+ ~((target_ulong)0xFFFF0000), 0);
}
#endif
#endif
-void do_tw (int flags)
+void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
{
- if (!likely(!(((int32_t)T0 < (int32_t)T1 && (flags & 0x10)) ||
- ((int32_t)T0 > (int32_t)T1 && (flags & 0x08)) ||
- ((int32_t)T0 == (int32_t)T1 && (flags & 0x04)) ||
- ((uint32_t)T0 < (uint32_t)T1 && (flags & 0x02)) ||
- ((uint32_t)T0 > (uint32_t)T1 && (flags & 0x01))))) {
+ if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
+ ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
+ ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
+ ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
+ ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
raise_exception_err(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
}
}
#if defined(TARGET_PPC64)
-void do_td (int flags)
+void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
{
- if (!likely(!(((int64_t)T0 < (int64_t)T1 && (flags & 0x10)) ||
- ((int64_t)T0 > (int64_t)T1 && (flags & 0x08)) ||
- ((int64_t)T0 == (int64_t)T1 && (flags & 0x04)) ||
- ((uint64_t)T0 < (uint64_t)T1 && (flags & 0x02)) ||
- ((uint64_t)T0 > (uint64_t)T1 && (flags & 0x01)))))
+ if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
+ ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
+ ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
+ ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
+ ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
raise_exception_err(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
}
#endif
@@ -1597,9 +1859,9 @@ void do_POWER_rac (void)
env->nb_BATs = nb_BATs;
}
-void do_POWER_rfsvc (void)
+void helper_rfsvc (void)
{
- __do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
+ do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
}
void do_store_hid0_601 (void)
@@ -1627,19 +1889,19 @@ void do_store_hid0_601 (void)
/* mfrom is the most crazy instruction ever seen, imho ! */
/* Real implementation uses a ROM table. Do the same */
#define USE_MFROM_ROM_TABLE
-void do_op_602_mfrom (void)
+target_ulong helper_602_mfrom (target_ulong arg)
{
- if (likely(T0 < 602)) {
+ if (likely(arg < 602)) {
#if defined(USE_MFROM_ROM_TABLE)
#include "mfrom_table.c"
- T0 = mfrom_ROM_table[T0];
+ return mfrom_ROM_table[T0];
#else
double d;
/* Extremly decomposed:
- * -T0 / 256
- * T0 = 256 * log10(10 + 1.0) + 0.5
+ * -arg / 256
+ * return 256 * log10(10 + 1.0) + 0.5
*/
- d = T0;
+ d = arg;
d = float64_div(d, 256, &env->fp_status);
d = float64_chs(d);
d = exp10(d); // XXX: use float emulation function
@@ -1647,10 +1909,10 @@ void do_op_602_mfrom (void)
d = log10(d); // XXX: use float emulation function
d = float64_mul(d, 256, &env->fp_status);
d = float64_add(d, 0.5, &env->fp_status);
- T0 = float64_round_to_int(d, &env->fp_status);
+ return float64_round_to_int(d, &env->fp_status);
#endif
} else {
- T0 = 0;
+ return 0;
}
}
@@ -1697,28 +1959,28 @@ void do_store_dcr (void)
}
#if !defined(CONFIG_USER_ONLY)
-void do_40x_rfci (void)
+void helper_40x_rfci (void)
{
- __do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
- ~((target_ulong)0xFFFF0000), 0);
+ do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
+ ~((target_ulong)0xFFFF0000), 0);
}
-void do_rfci (void)
+void helper_rfci (void)
{
- __do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
- ~((target_ulong)0x3FFF0000), 0);
+ do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
+ ~((target_ulong)0x3FFF0000), 0);
}
-void do_rfdi (void)
+void helper_rfdi (void)
{
- __do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
- ~((target_ulong)0x3FFF0000), 0);
+ do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
+ ~((target_ulong)0x3FFF0000), 0);
}
-void do_rfmci (void)
+void helper_rfmci (void)
{
- __do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
- ~((target_ulong)0x3FFF0000), 0);
+ do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
+ ~((target_ulong)0x3FFF0000), 0);
}
void do_load_403_pb (int num)
@@ -2442,7 +2704,7 @@ void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
/* Software driven TLBs management */
/* PowerPC 602/603 software TLB load instructions helpers */
-void do_load_6xx_tlb (int is_code)
+static void helper_load_6xx_tlb (target_ulong new_EPN, int is_code)
{
target_ulong RPN, CMP, EPN;
int way;
@@ -2464,11 +2726,22 @@ void do_load_6xx_tlb (int is_code)
}
#endif
/* Store this TLB */
- ppc6xx_tlb_store(env, (uint32_t)(T0 & TARGET_PAGE_MASK),
+ ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
way, is_code, CMP, RPN);
}
-void do_load_74xx_tlb (int is_code)
+void helper_load_6xx_tlbd (target_ulong EPN)
+{
+ helper_load_6xx_tlb(EPN, 0);
+}
+
+void helper_load_6xx_tlbi (target_ulong EPN)
+{
+ helper_load_6xx_tlb(EPN, 1);
+}
+
+/* PowerPC 74xx software TLB load instructions helpers */
+static void helper_load_74xx_tlb (target_ulong new_EPN, int is_code)
{
target_ulong RPN, CMP, EPN;
int way;
@@ -2485,10 +2758,20 @@ void do_load_74xx_tlb (int is_code)
}
#endif
/* Store this TLB */
- ppc6xx_tlb_store(env, (uint32_t)(T0 & TARGET_PAGE_MASK),
+ ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
way, is_code, CMP, RPN);
}
+void helper_load_74xx_tlbd (target_ulong EPN)
+{
+ helper_load_74xx_tlb(EPN, 0);
+}
+
+void helper_load_74xx_tlbi (target_ulong EPN)
+{
+ helper_load_74xx_tlb(EPN, 1);
+}
+
static always_inline target_ulong booke_tlb_to_page_size (int size)
{
return 1024 << (2 * size);
diff --git a/target-ppc/op_helper.h b/target-ppc/op_helper.h
index 30f9ca1eb..4318f9afa 100644
--- a/target-ppc/op_helper.h
+++ b/target-ppc/op_helper.h
@@ -18,41 +18,9 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#if defined(MEMSUFFIX)
-
-/* Memory load/store helpers */
-void glue(do_lsw, MEMSUFFIX) (int dst);
-void glue(do_stsw, MEMSUFFIX) (int src);
-void glue(do_lmw, MEMSUFFIX) (int dst);
-void glue(do_lmw_le, MEMSUFFIX) (int dst);
-void glue(do_stmw, MEMSUFFIX) (int src);
-void glue(do_stmw_le, MEMSUFFIX) (int src);
-void glue(do_icbi, MEMSUFFIX) (void);
-void glue(do_dcbz, MEMSUFFIX) (void);
-void glue(do_POWER_lscbx, MEMSUFFIX) (int dest, int ra, int rb);
-void glue(do_POWER2_lfq, MEMSUFFIX) (void);
-void glue(do_POWER2_lfq_le, MEMSUFFIX) (void);
-void glue(do_POWER2_stfq, MEMSUFFIX) (void);
-void glue(do_POWER2_stfq_le, MEMSUFFIX) (void);
-
-#if defined(TARGET_PPC64)
-void glue(do_lsw_64, MEMSUFFIX) (int dst);
-void glue(do_stsw_64, MEMSUFFIX) (int src);
-void glue(do_lmw_64, MEMSUFFIX) (int dst);
-void glue(do_lmw_le_64, MEMSUFFIX) (int dst);
-void glue(do_stmw_64, MEMSUFFIX) (int src);
-void glue(do_stmw_le_64, MEMSUFFIX) (int src);
-void glue(do_icbi_64, MEMSUFFIX) (void);
-void glue(do_dcbz_64, MEMSUFFIX) (void);
-#endif
-
-#else
-
void do_print_mem_EA (target_ulong EA);
/* Registers load and stores */
-void do_load_cr (void);
-void do_store_cr (uint32_t mask);
#if defined(TARGET_PPC64)
void do_store_pri (int prio);
#endif
@@ -60,19 +28,8 @@ target_ulong ppc_load_dump_spr (int sprn);
void ppc_store_dump_spr (int sprn, target_ulong val);
/* Misc */
-void do_tw (int flags);
-#if defined(TARGET_PPC64)
-void do_td (int flags);
-#endif
#if !defined(CONFIG_USER_ONLY)
void do_store_msr (void);
-void do_rfi (void);
-#if defined(TARGET_PPC64)
-void do_rfid (void);
-void do_hrfid (void);
-#endif
-void do_load_6xx_tlb (int is_code);
-void do_load_74xx_tlb (int is_code);
#endif
/* POWER / PowerPC 601 specific helpers */
@@ -87,15 +44,9 @@ void do_POWER_maskg (void);
void do_POWER_mulo (void);
#if !defined(CONFIG_USER_ONLY)
void do_POWER_rac (void);
-void do_POWER_rfsvc (void);
void do_store_hid0_601 (void);
#endif
-/* PowerPC 602 specific helper */
-#if !defined(CONFIG_USER_ONLY)
-void do_op_602_mfrom (void);
-#endif
-
/* PowerPC 440 specific helpers */
#if !defined(CONFIG_USER_ONLY)
void do_440_tlbre (int word);
@@ -106,10 +57,6 @@ void do_440_tlbwe (int word);
void do_load_dcr (void);
void do_store_dcr (void);
#if !defined(CONFIG_USER_ONLY)
-void do_40x_rfci (void);
-void do_rfci (void);
-void do_rfdi (void);
-void do_rfmci (void);
void do_4xx_tlbre_lo (void);
void do_4xx_tlbre_hi (void);
void do_4xx_tlbwe_lo (void);
@@ -124,5 +71,3 @@ void do_440_dlmzb (void);
void do_load_403_pb (int num);
void do_store_403_pb (int num);
#endif
-
-#endif
diff --git a/target-ppc/op_helper_mem.h b/target-ppc/op_helper_mem.h
deleted file mode 100644
index c8ef7ce24..000000000
--- a/target-ppc/op_helper_mem.h
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * PowerPC emulation micro-operations helpers for qemu.
- *
- * Copyright (c) 2003-2007 Jocelyn Mayer
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include "op_mem_access.h"
-
-/* Multiple word / string load and store */
-void glue(do_lmw, MEMSUFFIX) (int dst)
-{
- for (; dst < 32; dst++, T0 += 4) {
- env->gpr[dst] = glue(ldu32, MEMSUFFIX)((uint32_t)T0);
- }
-}
-
-#if defined(TARGET_PPC64)
-void glue(do_lmw_64, MEMSUFFIX) (int dst)
-{
- for (; dst < 32; dst++, T0 += 4) {
- env->gpr[dst] = glue(ldu32, MEMSUFFIX)((uint64_t)T0);
- }
-}
-#endif
-
-void glue(do_stmw, MEMSUFFIX) (int src)
-{
- for (; src < 32; src++, T0 += 4) {
- glue(st32, MEMSUFFIX)((uint32_t)T0, env->gpr[src]);
- }
-}
-
-#if defined(TARGET_PPC64)
-void glue(do_stmw_64, MEMSUFFIX) (int src)
-{
- for (; src < 32; src++, T0 += 4) {
- glue(st32, MEMSUFFIX)((uint64_t)T0, env->gpr[src]);
- }
-}
-#endif
-
-void glue(do_lmw_le, MEMSUFFIX) (int dst)
-{
- for (; dst < 32; dst++, T0 += 4) {
- env->gpr[dst] = glue(ldu32r, MEMSUFFIX)((uint32_t)T0);
- }
-}
-
-#if defined(TARGET_PPC64)
-void glue(do_lmw_le_64, MEMSUFFIX) (int dst)
-{
- for (; dst < 32; dst++, T0 += 4) {
- env->gpr[dst] = glue(ldu32r, MEMSUFFIX)((uint64_t)T0);
- }
-}
-#endif
-
-void glue(do_stmw_le, MEMSUFFIX) (int src)
-{
- for (; src < 32; src++, T0 += 4) {
- glue(st32r, MEMSUFFIX)((uint32_t)T0, env->gpr[src]);
- }
-}
-
-#if defined(TARGET_PPC64)
-void glue(do_stmw_le_64, MEMSUFFIX) (int src)
-{
- for (; src < 32; src++, T0 += 4) {
- glue(st32r, MEMSUFFIX)((uint64_t)T0, env->gpr[src]);
- }
-}
-#endif
-
-void glue(do_lsw, MEMSUFFIX) (int dst)
-{
- uint32_t tmp;
- int sh;
-
- for (; T1 > 3; T1 -= 4, T0 += 4) {
- env->gpr[dst++] = glue(ldu32, MEMSUFFIX)((uint32_t)T0);
- if (unlikely(dst == 32))
- dst = 0;
- }
- if (unlikely(T1 != 0)) {
- tmp = 0;
- for (sh = 24; T1 > 0; T1--, T0++, sh -= 8) {
- tmp |= glue(ldu8, MEMSUFFIX)((uint32_t)T0) << sh;
- }
- env->gpr[dst] = tmp;
- }
-}
-
-#if defined(TARGET_PPC64)
-void glue(do_lsw_64, MEMSUFFIX) (int dst)
-{
- uint32_t tmp;
- int sh;
-
- for (; T1 > 3; T1 -= 4, T0 += 4) {
- env->gpr[dst++] = glue(ldu32, MEMSUFFIX)((uint64_t)T0);
- if (unlikely(dst == 32))
- dst = 0;
- }
- if (unlikely(T1 != 0)) {
- tmp = 0;
- for (sh = 24; T1 > 0; T1--, T0++, sh -= 8) {
- tmp |= glue(ldu8, MEMSUFFIX)((uint64_t)T0) << sh;
- }
- env->gpr[dst] = tmp;
- }
-}
-#endif
-
-void glue(do_stsw, MEMSUFFIX) (int src)
-{
- int sh;
-
- for (; T1 > 3; T1 -= 4, T0 += 4) {
- glue(st32, MEMSUFFIX)((uint32_t)T0, env->gpr[src++]);
- if (unlikely(src == 32))
- src = 0;
- }
- if (unlikely(T1 != 0)) {
- for (sh = 24; T1 > 0; T1--, T0++, sh -= 8)
- glue(st8, MEMSUFFIX)((uint32_t)T0, (env->gpr[src] >> sh) & 0xFF);
- }
-}
-
-#if defined(TARGET_PPC64)
-void glue(do_stsw_64, MEMSUFFIX) (int src)
-{
- int sh;
-
- for (; T1 > 3; T1 -= 4, T0 += 4) {
- glue(st32, MEMSUFFIX)((uint64_t)T0, env->gpr[src++]);
- if (unlikely(src == 32))
- src = 0;
- }
- if (unlikely(T1 != 0)) {
- for (sh = 24; T1 > 0; T1--, T0++, sh -= 8)
- glue(st8, MEMSUFFIX)((uint64_t)T0, (env->gpr[src] >> sh) & 0xFF);
- }
-}
-#endif
-
-/* Instruction cache invalidation helper */
-void glue(do_icbi, MEMSUFFIX) (void)
-{
- uint32_t tmp;
- /* Invalidate one cache line :
- * PowerPC specification says this is to be treated like a load
- * (not a fetch) by the MMU. To be sure it will be so,
- * do the load "by hand".
- */
- T0 &= ~(env->icache_line_size - 1);
- tmp = glue(ldl, MEMSUFFIX)((uint32_t)T0);
- tb_invalidate_page_range((uint32_t)T0,
- (uint32_t)(T0 + env->icache_line_size));
-}
-
-#if defined(TARGET_PPC64)
-void glue(do_icbi_64, MEMSUFFIX) (void)
-{
- uint64_t tmp;
- /* Invalidate one cache line :
- * PowerPC specification says this is to be treated like a load
- * (not a fetch) by the MMU. To be sure it will be so,
- * do the load "by hand".
- */
- T0 &= ~(env->icache_line_size - 1);
- tmp = glue(ldq, MEMSUFFIX)((uint64_t)T0);
- tb_invalidate_page_range((uint64_t)T0,
- (uint64_t)(T0 + env->icache_line_size));
-}
-#endif
-
-void glue(do_dcbz, MEMSUFFIX) (void)
-{
- int dcache_line_size = env->dcache_line_size;
-
- /* XXX: should be 970 specific (?) */
- if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
- dcache_line_size = 32;
- T0 &= ~(uint32_t)(dcache_line_size - 1);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x00), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x04), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x08), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x0C), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x10), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x14), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x18), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x1C), 0);
- if (dcache_line_size >= 64) {
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x20UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x24UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x28UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x2CUL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x30UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x34UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x38UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x3CUL), 0);
- if (dcache_line_size >= 128) {
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x40UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x44UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x48UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x4CUL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x50UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x54UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x58UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x5CUL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x60UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x64UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x68UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x6CUL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x70UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x74UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x78UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x7CUL), 0);
- }
- }
-}
-
-#if defined(TARGET_PPC64)
-void glue(do_dcbz_64, MEMSUFFIX) (void)
-{
- int dcache_line_size = env->dcache_line_size;
-
- /* XXX: should be 970 specific (?) */
- if (((env->spr[SPR_970_HID5] >> 6) & 0x3) == 0x2)
- dcache_line_size = 32;
- T0 &= ~(uint64_t)(dcache_line_size - 1);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x00), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x04), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x08), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x0C), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x10), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x14), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x18), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x1C), 0);
- if (dcache_line_size >= 64) {
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x20UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x24UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x28UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x2CUL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x30UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x34UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x38UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x3CUL), 0);
- if (dcache_line_size >= 128) {
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x40UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x44UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x48UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x4CUL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x50UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x54UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x58UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x5CUL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x60UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x64UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x68UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x6CUL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x70UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x74UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x78UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x7CUL), 0);
- }
- }
-}
-#endif
-
-/* PowerPC 601 specific instructions (POWER bridge) */
-// XXX: to be tested
-void glue(do_POWER_lscbx, MEMSUFFIX) (int dest, int ra, int rb)
-{
- int i, c, d, reg;
-
- d = 24;
- reg = dest;
- for (i = 0; i < T1; i++) {
- c = glue(ldu8, MEMSUFFIX)((uint32_t)T0++);
- /* ra (if not 0) and rb are never modified */
- if (likely(reg != rb && (ra == 0 || reg != ra))) {
- env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
- }
- if (unlikely(c == T2))
- break;
- if (likely(d != 0)) {
- d -= 8;
- } else {
- d = 24;
- reg++;
- reg = reg & 0x1F;
- }
- }
- T0 = i;
-}
-
-/* XXX: TAGs are not managed */
-void glue(do_POWER2_lfq, MEMSUFFIX) (void)
-{
- FT0 = glue(ldfq, MEMSUFFIX)((uint32_t)T0);
- FT1 = glue(ldfq, MEMSUFFIX)((uint32_t)(T0 + 4));
-}
-
-static always_inline float64 glue(ldfqr, MEMSUFFIX) (target_ulong EA)
-{
- CPU_DoubleU u;
-
- u.d = glue(ldfq, MEMSUFFIX)(EA);
- u.ll = bswap64(u.ll);
-
- return u.d;
-}
-
-void glue(do_POWER2_lfq_le, MEMSUFFIX) (void)
-{
- FT0 = glue(ldfqr, MEMSUFFIX)((uint32_t)(T0 + 4));
- FT1 = glue(ldfqr, MEMSUFFIX)((uint32_t)T0);
-}
-
-void glue(do_POWER2_stfq, MEMSUFFIX) (void)
-{
- glue(stfq, MEMSUFFIX)((uint32_t)T0, FT0);
- glue(stfq, MEMSUFFIX)((uint32_t)(T0 + 4), FT1);
-}
-
-static always_inline void glue(stfqr, MEMSUFFIX) (target_ulong EA, float64 d)
-{
- CPU_DoubleU u;
-
- u.d = d;
- u.ll = bswap64(u.ll);
- glue(stfq, MEMSUFFIX)(EA, u.d);
-}
-
-void glue(do_POWER2_stfq_le, MEMSUFFIX) (void)
-{
- glue(stfqr, MEMSUFFIX)((uint32_t)(T0 + 4), FT0);
- glue(stfqr, MEMSUFFIX)((uint32_t)T0, FT1);
-}
-
-#undef MEMSUFFIX
diff --git a/target-ppc/op_mem.h b/target-ppc/op_mem.h
deleted file mode 100644
index 912ff6532..000000000
--- a/target-ppc/op_mem.h
+++ /dev/null
@@ -1,1096 +0,0 @@
-/*
- * PowerPC emulation micro-operations for qemu.
- *
- * Copyright (c) 2003-2007 Jocelyn Mayer
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include "op_mem_access.h"
-
-/*** Integer load and store multiple ***/
-void OPPROTO glue(op_lmw, MEMSUFFIX) (void)
-{
- glue(do_lmw, MEMSUFFIX)(PARAM1);
- RETURN();
-}
-
-#if defined(TARGET_PPC64)
-void OPPROTO glue(op_lmw_64, MEMSUFFIX) (void)
-{
- glue(do_lmw_64, MEMSUFFIX)(PARAM1);
- RETURN();
-}
-#endif
-
-void OPPROTO glue(op_lmw_le, MEMSUFFIX) (void)
-{
- glue(do_lmw_le, MEMSUFFIX)(PARAM1);
- RETURN();
-}
-
-#if defined(TARGET_PPC64)
-void OPPROTO glue(op_lmw_le_64, MEMSUFFIX) (void)
-{
- glue(do_lmw_le_64, MEMSUFFIX)(PARAM1);
- RETURN();
-}
-#endif
-
-void OPPROTO glue(op_stmw, MEMSUFFIX) (void)
-{
- glue(do_stmw, MEMSUFFIX)(PARAM1);
- RETURN();
-}
-
-#if defined(TARGET_PPC64)
-void OPPROTO glue(op_stmw_64, MEMSUFFIX) (void)
-{
- glue(do_stmw_64, MEMSUFFIX)(PARAM1);
- RETURN();
-}
-#endif
-
-void OPPROTO glue(op_stmw_le, MEMSUFFIX) (void)
-{
- glue(do_stmw_le, MEMSUFFIX)(PARAM1);
- RETURN();
-}
-
-#if defined(TARGET_PPC64)
-void OPPROTO glue(op_stmw_le_64, MEMSUFFIX) (void)
-{
- glue(do_stmw_le_64, MEMSUFFIX)(PARAM1);
- RETURN();
-}
-#endif
-
-/*** Integer load and store strings ***/
-void OPPROTO glue(op_lswi, MEMSUFFIX) (void)
-{
- glue(do_lsw, MEMSUFFIX)(PARAM1);
- RETURN();
-}
-
-#if defined(TARGET_PPC64)
-void OPPROTO glue(op_lswi_64, MEMSUFFIX) (void)
-{
- glue(do_lsw_64, MEMSUFFIX)(PARAM1);
- RETURN();
-}
-#endif
-
-/* PPC32 specification says we must generate an exception if
- * rA is in the range of registers to be loaded.
- * In an other hand, IBM says this is valid, but rA won't be loaded.
- * For now, I'll follow the spec...
- */
-void OPPROTO glue(op_lswx, MEMSUFFIX) (void)
-{
- /* Note: T1 comes from xer_bc then no cast is needed */
- if (likely(T1 != 0)) {
- if (unlikely((PARAM1 < PARAM2 && (PARAM1 + T1) > PARAM2) ||
- (PARAM1 < PARAM3 && (PARAM1 + T1) > PARAM3))) {
- raise_exception_err(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_INVAL |
- POWERPC_EXCP_INVAL_LSWX);
- } else {
- glue(do_lsw, MEMSUFFIX)(PARAM1);
- }
- }
- RETURN();
-}
-
-#if defined(TARGET_PPC64)
-void OPPROTO glue(op_lswx_64, MEMSUFFIX) (void)
-{
- /* Note: T1 comes from xer_bc then no cast is needed */
- if (likely(T1 != 0)) {
- if (unlikely((PARAM1 < PARAM2 && (PARAM1 + T1) > PARAM2) ||
- (PARAM1 < PARAM3 && (PARAM1 + T1) > PARAM3))) {
- raise_exception_err(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_INVAL |
- POWERPC_EXCP_INVAL_LSWX);
- } else {
- glue(do_lsw_64, MEMSUFFIX)(PARAM1);
- }
- }
- RETURN();
-}
-#endif
-
-void OPPROTO glue(op_stsw, MEMSUFFIX) (void)
-{
- glue(do_stsw, MEMSUFFIX)(PARAM1);
- RETURN();
-}
-
-#if defined(TARGET_PPC64)
-void OPPROTO glue(op_stsw_64, MEMSUFFIX) (void)
-{
- glue(do_stsw_64, MEMSUFFIX)(PARAM1);
- RETURN();
-}
-#endif
-
-/*** Floating-point store ***/
-#define PPC_STF_OP(name, op) \
-void OPPROTO glue(glue(op_st, name), MEMSUFFIX) (void) \
-{ \
- glue(op, MEMSUFFIX)((uint32_t)T0, FT0); \
- RETURN(); \
-}
-
-#if defined(TARGET_PPC64)
-#define PPC_STF_OP_64(name, op) \
-void OPPROTO glue(glue(glue(op_st, name), _64), MEMSUFFIX) (void) \
-{ \
- glue(op, MEMSUFFIX)((uint64_t)T0, FT0); \
- RETURN(); \
-}
-#endif
-
-static always_inline void glue(stfs, MEMSUFFIX) (target_ulong EA, float64 d)
-{
- glue(stfl, MEMSUFFIX)(EA, float64_to_float32(d, &env->fp_status));
-}
-
-static always_inline void glue(stfiw, MEMSUFFIX) (target_ulong EA, float64 d)
-{
- CPU_DoubleU u;
-
- /* Store the low order 32 bits without any conversion */
- u.d = d;
- glue(st32, MEMSUFFIX)(EA, u.l.lower);
-}
-
-PPC_STF_OP(fd, stfq);
-PPC_STF_OP(fs, stfs);
-PPC_STF_OP(fiw, stfiw);
-#if defined(TARGET_PPC64)
-PPC_STF_OP_64(fd, stfq);
-PPC_STF_OP_64(fs, stfs);
-PPC_STF_OP_64(fiw, stfiw);
-#endif
-
-static always_inline void glue(stfqr, MEMSUFFIX) (target_ulong EA, float64 d)
-{
- CPU_DoubleU u;
-
- u.d = d;
- u.ll = bswap64(u.ll);
- glue(stfq, MEMSUFFIX)(EA, u.d);
-}
-
-static always_inline void glue(stfsr, MEMSUFFIX) (target_ulong EA, float64 d)
-{
- CPU_FloatU u;
-
- u.f = float64_to_float32(d, &env->fp_status);
- u.l = bswap32(u.l);
- glue(stfl, MEMSUFFIX)(EA, u.f);
-}
-
-static always_inline void glue(stfiwr, MEMSUFFIX) (target_ulong EA, float64 d)
-{
- CPU_DoubleU u;
-
- /* Store the low order 32 bits without any conversion */
- u.d = d;
- u.l.lower = bswap32(u.l.lower);
- glue(st32, MEMSUFFIX)(EA, u.l.lower);
-}
-
-PPC_STF_OP(fd_le, stfqr);
-PPC_STF_OP(fs_le, stfsr);
-PPC_STF_OP(fiw_le, stfiwr);
-#if defined(TARGET_PPC64)
-PPC_STF_OP_64(fd_le, stfqr);
-PPC_STF_OP_64(fs_le, stfsr);
-PPC_STF_OP_64(fiw_le, stfiwr);
-#endif
-
-/*** Floating-point load ***/
-#define PPC_LDF_OP(name, op) \
-void OPPROTO glue(glue(op_l, name), MEMSUFFIX) (void) \
-{ \
- FT0 = glue(op, MEMSUFFIX)((uint32_t)T0); \
- RETURN(); \
-}
-
-#if defined(TARGET_PPC64)
-#define PPC_LDF_OP_64(name, op) \
-void OPPROTO glue(glue(glue(op_l, name), _64), MEMSUFFIX) (void) \
-{ \
- FT0 = glue(op, MEMSUFFIX)((uint64_t)T0); \
- RETURN(); \
-}
-#endif
-
-static always_inline float64 glue(ldfs, MEMSUFFIX) (target_ulong EA)
-{
- return float32_to_float64(glue(ldfl, MEMSUFFIX)(EA), &env->fp_status);
-}
-
-PPC_LDF_OP(fd, ldfq);
-PPC_LDF_OP(fs, ldfs);
-#if defined(TARGET_PPC64)
-PPC_LDF_OP_64(fd, ldfq);
-PPC_LDF_OP_64(fs, ldfs);
-#endif
-
-static always_inline float64 glue(ldfqr, MEMSUFFIX) (target_ulong EA)
-{
- CPU_DoubleU u;
-
- u.d = glue(ldfq, MEMSUFFIX)(EA);
- u.ll = bswap64(u.ll);
-
- return u.d;
-}
-
-static always_inline float64 glue(ldfsr, MEMSUFFIX) (target_ulong EA)
-{
- CPU_FloatU u;
-
- u.f = glue(ldfl, MEMSUFFIX)(EA);
- u.l = bswap32(u.l);
-
- return float32_to_float64(u.f, &env->fp_status);
-}
-
-PPC_LDF_OP(fd_le, ldfqr);
-PPC_LDF_OP(fs_le, ldfsr);
-#if defined(TARGET_PPC64)
-PPC_LDF_OP_64(fd_le, ldfqr);
-PPC_LDF_OP_64(fs_le, ldfsr);
-#endif
-
-/* Load and set reservation */
-void OPPROTO glue(op_lwarx, MEMSUFFIX) (void)
-{
- if (unlikely(T0 & 0x03)) {
- raise_exception(env, POWERPC_EXCP_ALIGN);
- } else {
- T1 = glue(ldu32, MEMSUFFIX)((uint32_t)T0);
- env->reserve = (uint32_t)T0;
- }
- RETURN();
-}
-
-#if defined(TARGET_PPC64)
-void OPPROTO glue(op_lwarx_64, MEMSUFFIX) (void)
-{
- if (unlikely(T0 & 0x03)) {
- raise_exception(env, POWERPC_EXCP_ALIGN);
- } else {
- T1 = glue(ldu32, MEMSUFFIX)((uint64_t)T0);
- env->reserve = (uint64_t)T0;
- }
- RETURN();
-}
-
-void OPPROTO glue(op_ldarx, MEMSUFFIX) (void)
-{
- if (unlikely(T0 & 0x03)) {
- raise_exception(env, POWERPC_EXCP_ALIGN);
- } else {
- T1 = glue(ldu64, MEMSUFFIX)((uint32_t)T0);
- env->reserve = (uint32_t)T0;
- }
- RETURN();
-}
-
-void OPPROTO glue(op_ldarx_64, MEMSUFFIX) (void)
-{
- if (unlikely(T0 & 0x03)) {
- raise_exception(env, POWERPC_EXCP_ALIGN);
- } else {
- T1 = glue(ldu64, MEMSUFFIX)((uint64_t)T0);
- env->reserve = (uint64_t)T0;
- }
- RETURN();
-}
-#endif
-
-void OPPROTO glue(op_lwarx_le, MEMSUFFIX) (void)
-{
- if (unlikely(T0 & 0x03)) {
- raise_exception(env, POWERPC_EXCP_ALIGN);
- } else {
- T1 = glue(ldu32r, MEMSUFFIX)((uint32_t)T0);
- env->reserve = (uint32_t)T0;
- }
- RETURN();
-}
-
-#if defined(TARGET_PPC64)
-void OPPROTO glue(op_lwarx_le_64, MEMSUFFIX) (void)
-{
- if (unlikely(T0 & 0x03)) {
- raise_exception(env, POWERPC_EXCP_ALIGN);
- } else {
- T1 = glue(ldu32r, MEMSUFFIX)((uint64_t)T0);
- env->reserve = (uint64_t)T0;
- }
- RETURN();
-}
-
-void OPPROTO glue(op_ldarx_le, MEMSUFFIX) (void)
-{
- if (unlikely(T0 & 0x03)) {
- raise_exception(env, POWERPC_EXCP_ALIGN);
- } else {
- T1 = glue(ldu64r, MEMSUFFIX)((uint32_t)T0);
- env->reserve = (uint32_t)T0;
- }
- RETURN();
-}
-
-void OPPROTO glue(op_ldarx_le_64, MEMSUFFIX) (void)
-{
- if (unlikely(T0 & 0x03)) {
- raise_exception(env, POWERPC_EXCP_ALIGN);
- } else {
- T1 = glue(ldu64r, MEMSUFFIX)((uint64_t)T0);
- env->reserve = (uint64_t)T0;
- }
- RETURN();
-}
-#endif
-
-/* Store with reservation */
-void OPPROTO glue(op_stwcx, MEMSUFFIX) (void)
-{
- if (unlikely(T0 & 0x03)) {
- raise_exception(env, POWERPC_EXCP_ALIGN);
- } else {
- if (unlikely(env->reserve != (uint32_t)T0)) {
- env->crf[0] = xer_so;
- } else {
- glue(st32, MEMSUFFIX)((uint32_t)T0, T1);
- env->crf[0] = xer_so | 0x02;
- }
- }
- env->reserve = (target_ulong)-1ULL;
- RETURN();
-}
-
-#if defined(TARGET_PPC64)
-void OPPROTO glue(op_stwcx_64, MEMSUFFIX) (void)
-{
- if (unlikely(T0 & 0x03)) {
- raise_exception(env, POWERPC_EXCP_ALIGN);
- } else {
- if (unlikely(env->reserve != (uint64_t)T0)) {
- env->crf[0] = xer_so;
- } else {
- glue(st32, MEMSUFFIX)((uint64_t)T0, T1);
- env->crf[0] = xer_so | 0x02;
- }
- }
- env->reserve = (target_ulong)-1ULL;
- RETURN();
-}
-
-void OPPROTO glue(op_stdcx, MEMSUFFIX) (void)
-{
- if (unlikely(T0 & 0x03)) {
- raise_exception(env, POWERPC_EXCP_ALIGN);
- } else {
- if (unlikely(env->reserve != (uint32_t)T0)) {
- env->crf[0] = xer_so;
- } else {
- glue(st64, MEMSUFFIX)((uint32_t)T0, T1);
- env->crf[0] = xer_so | 0x02;
- }
- }
- env->reserve = (target_ulong)-1ULL;
- RETURN();
-}
-
-void OPPROTO glue(op_stdcx_64, MEMSUFFIX) (void)
-{
- if (unlikely(T0 & 0x03)) {
- raise_exception(env, POWERPC_EXCP_ALIGN);
- } else {
- if (unlikely(env->reserve != (uint64_t)T0)) {
- env->crf[0] = xer_so;
- } else {
- glue(st64, MEMSUFFIX)((uint64_t)T0, T1);
- env->crf[0] = xer_so | 0x02;
- }
- }
- env->reserve = (target_ulong)-1ULL;
- RETURN();
-}
-#endif
-
-void OPPROTO glue(op_stwcx_le, MEMSUFFIX) (void)
-{
- if (unlikely(T0 & 0x03)) {
- raise_exception(env, POWERPC_EXCP_ALIGN);
- } else {
- if (unlikely(env->reserve != (uint32_t)T0)) {
- env->crf[0] = xer_so;
- } else {
- glue(st32r, MEMSUFFIX)((uint32_t)T0, T1);
- env->crf[0] = xer_so | 0x02;
- }
- }
- env->reserve = (target_ulong)-1ULL;
- RETURN();
-}
-
-#if defined(TARGET_PPC64)
-void OPPROTO glue(op_stwcx_le_64, MEMSUFFIX) (void)
-{
- if (unlikely(T0 & 0x03)) {
- raise_exception(env, POWERPC_EXCP_ALIGN);
- } else {
- if (unlikely(env->reserve != (uint64_t)T0)) {
- env->crf[0] = xer_so;
- } else {
- glue(st32r, MEMSUFFIX)((uint64_t)T0, T1);
- env->crf[0] = xer_so | 0x02;
- }
- }
- env->reserve = (target_ulong)-1ULL;
- RETURN();
-}
-
-void OPPROTO glue(op_stdcx_le, MEMSUFFIX) (void)
-{
- if (unlikely(T0 & 0x03)) {
- raise_exception(env, POWERPC_EXCP_ALIGN);
- } else {
- if (unlikely(env->reserve != (uint32_t)T0)) {
- env->crf[0] = xer_so;
- } else {
- glue(st64r, MEMSUFFIX)((uint32_t)T0, T1);
- env->crf[0] = xer_so | 0x02;
- }
- }
- env->reserve = (target_ulong)-1ULL;
- RETURN();
-}
-
-void OPPROTO glue(op_stdcx_le_64, MEMSUFFIX) (void)
-{
- if (unlikely(T0 & 0x03)) {
- raise_exception(env, POWERPC_EXCP_ALIGN);
- } else {
- if (unlikely(env->reserve != (uint64_t)T0)) {
- env->crf[0] = xer_so;
- } else {
- glue(st64r, MEMSUFFIX)((uint64_t)T0, T1);
- env->crf[0] = xer_so | 0x02;
- }
- }
- env->reserve = (target_ulong)-1ULL;
- RETURN();
-}
-#endif
-
-void OPPROTO glue(op_dcbz_l32, MEMSUFFIX) (void)
-{
- T0 &= ~((uint32_t)31);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x00), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x04), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x08), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x0C), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x10), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x14), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x18), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x1C), 0);
- RETURN();
-}
-
-void OPPROTO glue(op_dcbz_l64, MEMSUFFIX) (void)
-{
- T0 &= ~((uint32_t)63);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x00), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x04), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x08), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x0C), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x10), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x14), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x18), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x1C), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x20UL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x24UL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x28UL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x2CUL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x30UL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x34UL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x38UL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x3CUL), 0);
- RETURN();
-}
-
-void OPPROTO glue(op_dcbz_l128, MEMSUFFIX) (void)
-{
- T0 &= ~((uint32_t)127);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x00), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x04), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x08), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x0C), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x10), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x14), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x18), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x1C), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x20UL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x24UL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x28UL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x2CUL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x30UL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x34UL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x38UL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x3CUL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x40UL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x44UL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x48UL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x4CUL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x50UL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x54UL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x58UL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x5CUL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x60UL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x64UL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x68UL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x6CUL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x70UL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x74UL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x78UL), 0);
- glue(st32, MEMSUFFIX)((uint32_t)(T0 + 0x7CUL), 0);
- RETURN();
-}
-
-void OPPROTO glue(op_dcbz, MEMSUFFIX) (void)
-{
- glue(do_dcbz, MEMSUFFIX)();
- RETURN();
-}
-
-#if defined(TARGET_PPC64)
-void OPPROTO glue(op_dcbz_l32_64, MEMSUFFIX) (void)
-{
- T0 &= ~((uint64_t)31);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x00), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x04), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x08), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x0C), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x10), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x14), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x18), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x1C), 0);
- RETURN();
-}
-
-void OPPROTO glue(op_dcbz_l64_64, MEMSUFFIX) (void)
-{
- T0 &= ~((uint64_t)63);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x00), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x04), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x08), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x0C), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x10), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x14), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x18), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x1C), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x20UL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x24UL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x28UL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x2CUL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x30UL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x34UL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x38UL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x3CUL), 0);
- RETURN();
-}
-
-void OPPROTO glue(op_dcbz_l128_64, MEMSUFFIX) (void)
-{
- T0 &= ~((uint64_t)127);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x00), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x04), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x08), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x0C), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x10), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x14), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x18), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x1C), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x20UL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x24UL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x28UL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x2CUL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x30UL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x34UL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x38UL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x3CUL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x40UL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x44UL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x48UL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x4CUL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x50UL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x54UL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x58UL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x5CUL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x60UL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x64UL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x68UL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x6CUL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x70UL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x74UL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x78UL), 0);
- glue(st32, MEMSUFFIX)((uint64_t)(T0 + 0x7CUL), 0);
- RETURN();
-}
-
-void OPPROTO glue(op_dcbz_64, MEMSUFFIX) (void)
-{
- glue(do_dcbz_64, MEMSUFFIX)();
- RETURN();
-}
-#endif
-
-/* Instruction cache block invalidate */
-void OPPROTO glue(op_icbi, MEMSUFFIX) (void)
-{
- glue(do_icbi, MEMSUFFIX)();
- RETURN();
-}
-
-#if defined(TARGET_PPC64)
-void OPPROTO glue(op_icbi_64, MEMSUFFIX) (void)
-{
- glue(do_icbi_64, MEMSUFFIX)();
- RETURN();
-}
-#endif
-
-/* External access */
-void OPPROTO glue(op_eciwx, MEMSUFFIX) (void)
-{
- T1 = glue(ldu32, MEMSUFFIX)((uint32_t)T0);
- RETURN();
-}
-
-#if defined(TARGET_PPC64)
-void OPPROTO glue(op_eciwx_64, MEMSUFFIX) (void)
-{
- T1 = glue(ldu32, MEMSUFFIX)((uint64_t)T0);
- RETURN();
-}
-#endif
-
-void OPPROTO glue(op_ecowx, MEMSUFFIX) (void)
-{
- glue(st32, MEMSUFFIX)((uint32_t)T0, T1);
- RETURN();
-}
-
-#if defined(TARGET_PPC64)
-void OPPROTO glue(op_ecowx_64, MEMSUFFIX) (void)
-{
- glue(st32, MEMSUFFIX)((uint64_t)T0, T1);
- RETURN();
-}
-#endif
-
-void OPPROTO glue(op_eciwx_le, MEMSUFFIX) (void)
-{
- T1 = glue(ldu32r, MEMSUFFIX)((uint32_t)T0);
- RETURN();
-}
-
-#if defined(TARGET_PPC64)
-void OPPROTO glue(op_eciwx_le_64, MEMSUFFIX) (void)
-{
- T1 = glue(ldu32r, MEMSUFFIX)((uint64_t)T0);
- RETURN();
-}
-#endif
-
-void OPPROTO glue(op_ecowx_le, MEMSUFFIX) (void)
-{
- glue(st32r, MEMSUFFIX)((uint32_t)T0, T1);
- RETURN();
-}
-
-#if defined(TARGET_PPC64)
-void OPPROTO glue(op_ecowx_le_64, MEMSUFFIX) (void)
-{
- glue(st32r, MEMSUFFIX)((uint64_t)T0, T1);
- RETURN();
-}
-#endif
-
-/* XXX: those micro-ops need tests ! */
-/* PowerPC 601 specific instructions (POWER bridge) */
-void OPPROTO glue(op_POWER_lscbx, MEMSUFFIX) (void)
-{
- /* When byte count is 0, do nothing */
- if (likely(T1 != 0)) {
- glue(do_POWER_lscbx, MEMSUFFIX)(PARAM1, PARAM2, PARAM3);
- }
- RETURN();
-}
-
-/* POWER2 quad load and store */
-/* XXX: TAGs are not managed */
-void OPPROTO glue(op_POWER2_lfq, MEMSUFFIX) (void)
-{
- glue(do_POWER2_lfq, MEMSUFFIX)();
- RETURN();
-}
-
-void glue(op_POWER2_lfq_le, MEMSUFFIX) (void)
-{
- glue(do_POWER2_lfq_le, MEMSUFFIX)();
- RETURN();
-}
-
-void OPPROTO glue(op_POWER2_stfq, MEMSUFFIX) (void)
-{
- glue(do_POWER2_stfq, MEMSUFFIX)();
- RETURN();
-}
-
-void OPPROTO glue(op_POWER2_stfq_le, MEMSUFFIX) (void)
-{
- glue(do_POWER2_stfq_le, MEMSUFFIX)();
- RETURN();
-}
-
-/* Altivec vector extension */
-#if defined(WORDS_BIGENDIAN)
-#define VR_DWORD0 0
-#define VR_DWORD1 1
-#else
-#define VR_DWORD0 1
-#define VR_DWORD1 0
-#endif
-void OPPROTO glue(op_vr_lvx, MEMSUFFIX) (void)
-{
- AVR0.u64[VR_DWORD0] = glue(ldu64, MEMSUFFIX)((uint32_t)T0);
- AVR0.u64[VR_DWORD1] = glue(ldu64, MEMSUFFIX)((uint32_t)T0 + 8);
-}
-
-void OPPROTO glue(op_vr_lvx_le, MEMSUFFIX) (void)
-{
- AVR0.u64[VR_DWORD1] = glue(ldu64r, MEMSUFFIX)((uint32_t)T0);
- AVR0.u64[VR_DWORD0] = glue(ldu64r, MEMSUFFIX)((uint32_t)T0 + 8);
-}
-
-void OPPROTO glue(op_vr_stvx, MEMSUFFIX) (void)
-{
- glue(st64, MEMSUFFIX)((uint32_t)T0, AVR0.u64[VR_DWORD0]);
- glue(st64, MEMSUFFIX)((uint32_t)T0 + 8, AVR0.u64[VR_DWORD1]);
-}
-
-void OPPROTO glue(op_vr_stvx_le, MEMSUFFIX) (void)
-{
- glue(st64r, MEMSUFFIX)((uint32_t)T0, AVR0.u64[VR_DWORD1]);
- glue(st64r, MEMSUFFIX)((uint32_t)T0 + 8, AVR0.u64[VR_DWORD0]);
-}
-
-#if defined(TARGET_PPC64)
-void OPPROTO glue(op_vr_lvx_64, MEMSUFFIX) (void)
-{
- AVR0.u64[VR_DWORD0] = glue(ldu64, MEMSUFFIX)((uint64_t)T0);
- AVR0.u64[VR_DWORD1] = glue(ldu64, MEMSUFFIX)((uint64_t)T0 + 8);
-}
-
-void OPPROTO glue(op_vr_lvx_le_64, MEMSUFFIX) (void)
-{
- AVR0.u64[VR_DWORD1] = glue(ldu64r, MEMSUFFIX)((uint64_t)T0);
- AVR0.u64[VR_DWORD0] = glue(ldu64r, MEMSUFFIX)((uint64_t)T0 + 8);
-}
-
-void OPPROTO glue(op_vr_stvx_64, MEMSUFFIX) (void)
-{
- glue(st64, MEMSUFFIX)((uint64_t)T0, AVR0.u64[VR_DWORD0]);
- glue(st64, MEMSUFFIX)((uint64_t)T0 + 8, AVR0.u64[VR_DWORD1]);
-}
-
-void OPPROTO glue(op_vr_stvx_le_64, MEMSUFFIX) (void)
-{
- glue(st64r, MEMSUFFIX)((uint64_t)T0, AVR0.u64[VR_DWORD1]);
- glue(st64r, MEMSUFFIX)((uint64_t)T0 + 8, AVR0.u64[VR_DWORD0]);
-}
-#endif
-#undef VR_DWORD0
-#undef VR_DWORD1
-
-/* SPE extension */
-#define _PPC_SPE_LD_OP(name, op) \
-void OPPROTO glue(glue(op_spe_l, name), MEMSUFFIX) (void) \
-{ \
- T1_64 = glue(op, MEMSUFFIX)((uint32_t)T0); \
- RETURN(); \
-}
-
-#if defined(TARGET_PPC64)
-#define _PPC_SPE_LD_OP_64(name, op) \
-void OPPROTO glue(glue(glue(op_spe_l, name), _64), MEMSUFFIX) (void) \
-{ \
- T1_64 = glue(op, MEMSUFFIX)((uint64_t)T0); \
- RETURN(); \
-}
-#define PPC_SPE_LD_OP(name, op) \
-_PPC_SPE_LD_OP(name, op); \
-_PPC_SPE_LD_OP_64(name, op)
-#else
-#define PPC_SPE_LD_OP(name, op) \
-_PPC_SPE_LD_OP(name, op)
-#endif
-
-#define _PPC_SPE_ST_OP(name, op) \
-void OPPROTO glue(glue(op_spe_st, name), MEMSUFFIX) (void) \
-{ \
- glue(op, MEMSUFFIX)((uint32_t)T0, T1_64); \
- RETURN(); \
-}
-
-#if defined(TARGET_PPC64)
-#define _PPC_SPE_ST_OP_64(name, op) \
-void OPPROTO glue(glue(glue(op_spe_st, name), _64), MEMSUFFIX) (void) \
-{ \
- glue(op, MEMSUFFIX)((uint64_t)T0, T1_64); \
- RETURN(); \
-}
-#define PPC_SPE_ST_OP(name, op) \
-_PPC_SPE_ST_OP(name, op); \
-_PPC_SPE_ST_OP_64(name, op)
-#else
-#define PPC_SPE_ST_OP(name, op) \
-_PPC_SPE_ST_OP(name, op)
-#endif
-
-PPC_SPE_LD_OP(dd, ldu64);
-PPC_SPE_ST_OP(dd, st64);
-PPC_SPE_LD_OP(dd_le, ldu64r);
-PPC_SPE_ST_OP(dd_le, st64r);
-static always_inline uint64_t glue(spe_ldw, MEMSUFFIX) (target_ulong EA)
-{
- uint64_t ret;
- ret = (uint64_t)glue(ldu32, MEMSUFFIX)(EA) << 32;
- ret |= (uint64_t)glue(ldu32, MEMSUFFIX)(EA + 4);
- return ret;
-}
-PPC_SPE_LD_OP(dw, spe_ldw);
-static always_inline void glue(spe_stdw, MEMSUFFIX) (target_ulong EA,
- uint64_t data)
-{
- glue(st32, MEMSUFFIX)(EA, data >> 32);
- glue(st32, MEMSUFFIX)(EA + 4, data);
-}
-PPC_SPE_ST_OP(dw, spe_stdw);
-static always_inline uint64_t glue(spe_ldw_le, MEMSUFFIX) (target_ulong EA)
-{
- uint64_t ret;
- ret = (uint64_t)glue(ldu32r, MEMSUFFIX)(EA) << 32;
- ret |= (uint64_t)glue(ldu32r, MEMSUFFIX)(EA + 4);
- return ret;
-}
-PPC_SPE_LD_OP(dw_le, spe_ldw_le);
-static always_inline void glue(spe_stdw_le, MEMSUFFIX) (target_ulong EA,
- uint64_t data)
-{
- glue(st32r, MEMSUFFIX)(EA, data >> 32);
- glue(st32r, MEMSUFFIX)(EA + 4, data);
-}
-PPC_SPE_ST_OP(dw_le, spe_stdw_le);
-static always_inline uint64_t glue(spe_ldh, MEMSUFFIX) (target_ulong EA)
-{
- uint64_t ret;
- ret = (uint64_t)glue(ldu16, MEMSUFFIX)(EA) << 48;
- ret |= (uint64_t)glue(ldu16, MEMSUFFIX)(EA + 2) << 32;
- ret |= (uint64_t)glue(ldu16, MEMSUFFIX)(EA + 4) << 16;
- ret |= (uint64_t)glue(ldu16, MEMSUFFIX)(EA + 6);
- return ret;
-}
-PPC_SPE_LD_OP(dh, spe_ldh);
-static always_inline void glue(spe_stdh, MEMSUFFIX) (target_ulong EA,
- uint64_t data)
-{
- glue(st16, MEMSUFFIX)(EA, data >> 48);
- glue(st16, MEMSUFFIX)(EA + 2, data >> 32);
- glue(st16, MEMSUFFIX)(EA + 4, data >> 16);
- glue(st16, MEMSUFFIX)(EA + 6, data);
-}
-PPC_SPE_ST_OP(dh, spe_stdh);
-static always_inline uint64_t glue(spe_ldh_le, MEMSUFFIX) (target_ulong EA)
-{
- uint64_t ret;
- ret = (uint64_t)glue(ldu16r, MEMSUFFIX)(EA) << 48;
- ret |= (uint64_t)glue(ldu16r, MEMSUFFIX)(EA + 2) << 32;
- ret |= (uint64_t)glue(ldu16r, MEMSUFFIX)(EA + 4) << 16;
- ret |= (uint64_t)glue(ldu16r, MEMSUFFIX)(EA + 6);
- return ret;
-}
-PPC_SPE_LD_OP(dh_le, spe_ldh_le);
-static always_inline void glue(spe_stdh_le, MEMSUFFIX) (target_ulong EA,
- uint64_t data)
-{
- glue(st16r, MEMSUFFIX)(EA, data >> 48);
- glue(st16r, MEMSUFFIX)(EA + 2, data >> 32);
- glue(st16r, MEMSUFFIX)(EA + 4, data >> 16);
- glue(st16r, MEMSUFFIX)(EA + 6, data);
-}
-PPC_SPE_ST_OP(dh_le, spe_stdh_le);
-static always_inline uint64_t glue(spe_lwhe, MEMSUFFIX) (target_ulong EA)
-{
- uint64_t ret;
- ret = (uint64_t)glue(ldu16, MEMSUFFIX)(EA) << 48;
- ret |= (uint64_t)glue(ldu16, MEMSUFFIX)(EA + 2) << 16;
- return ret;
-}
-PPC_SPE_LD_OP(whe, spe_lwhe);
-static always_inline void glue(spe_stwhe, MEMSUFFIX) (target_ulong EA,
- uint64_t data)
-{
- glue(st16, MEMSUFFIX)(EA, data >> 48);
- glue(st16, MEMSUFFIX)(EA + 2, data >> 16);
-}
-PPC_SPE_ST_OP(whe, spe_stwhe);
-static always_inline uint64_t glue(spe_lwhe_le, MEMSUFFIX) (target_ulong EA)
-{
- uint64_t ret;
- ret = (uint64_t)glue(ldu16r, MEMSUFFIX)(EA) << 48;
- ret |= (uint64_t)glue(ldu16r, MEMSUFFIX)(EA + 2) << 16;
- return ret;
-}
-PPC_SPE_LD_OP(whe_le, spe_lwhe_le);
-static always_inline void glue(spe_stwhe_le, MEMSUFFIX) (target_ulong EA,
- uint64_t data)
-{
- glue(st16r, MEMSUFFIX)(EA, data >> 48);
- glue(st16r, MEMSUFFIX)(EA + 2, data >> 16);
-}
-PPC_SPE_ST_OP(whe_le, spe_stwhe_le);
-static always_inline uint64_t glue(spe_lwhou, MEMSUFFIX) (target_ulong EA)
-{
- uint64_t ret;
- ret = (uint64_t)glue(ldu16, MEMSUFFIX)(EA) << 32;
- ret |= (uint64_t)glue(ldu16, MEMSUFFIX)(EA + 2);
- return ret;
-}
-PPC_SPE_LD_OP(whou, spe_lwhou);
-static always_inline uint64_t glue(spe_lwhos, MEMSUFFIX) (target_ulong EA)
-{
- uint64_t ret;
- ret = ((uint64_t)((int32_t)glue(lds16, MEMSUFFIX)(EA))) << 32;
- ret |= (uint64_t)((int32_t)glue(lds16, MEMSUFFIX)(EA + 2));
- return ret;
-}
-PPC_SPE_LD_OP(whos, spe_lwhos);
-static always_inline void glue(spe_stwho, MEMSUFFIX) (target_ulong EA,
- uint64_t data)
-{
- glue(st16, MEMSUFFIX)(EA, data >> 32);
- glue(st16, MEMSUFFIX)(EA + 2, data);
-}
-PPC_SPE_ST_OP(who, spe_stwho);
-static always_inline uint64_t glue(spe_lwhou_le, MEMSUFFIX) (target_ulong EA)
-{
- uint64_t ret;
- ret = (uint64_t)glue(ldu16r, MEMSUFFIX)(EA) << 32;
- ret |= (uint64_t)glue(ldu16r, MEMSUFFIX)(EA + 2);
- return ret;
-}
-PPC_SPE_LD_OP(whou_le, spe_lwhou_le);
-static always_inline uint64_t glue(spe_lwhos_le, MEMSUFFIX) (target_ulong EA)
-{
- uint64_t ret;
- ret = ((uint64_t)((int32_t)glue(lds16r, MEMSUFFIX)(EA))) << 32;
- ret |= (uint64_t)((int32_t)glue(lds16r, MEMSUFFIX)(EA + 2));
- return ret;
-}
-PPC_SPE_LD_OP(whos_le, spe_lwhos_le);
-static always_inline void glue(spe_stwho_le, MEMSUFFIX) (target_ulong EA,
- uint64_t data)
-{
- glue(st16r, MEMSUFFIX)(EA, data >> 32);
- glue(st16r, MEMSUFFIX)(EA + 2, data);
-}
-PPC_SPE_ST_OP(who_le, spe_stwho_le);
-static always_inline void glue(spe_stwwo, MEMSUFFIX) (target_ulong EA,
- uint64_t data)
-{
- glue(st32, MEMSUFFIX)(EA, data);
-}
-PPC_SPE_ST_OP(wwo, spe_stwwo);
-static always_inline void glue(spe_stwwo_le, MEMSUFFIX) (target_ulong EA,
- uint64_t data)
-{
- glue(st32r, MEMSUFFIX)(EA, data);
-}
-PPC_SPE_ST_OP(wwo_le, spe_stwwo_le);
-static always_inline uint64_t glue(spe_lh, MEMSUFFIX) (target_ulong EA)
-{
- uint16_t tmp;
- tmp = glue(ldu16, MEMSUFFIX)(EA);
- return ((uint64_t)tmp << 48) | ((uint64_t)tmp << 16);
-}
-PPC_SPE_LD_OP(h, spe_lh);
-static always_inline uint64_t glue(spe_lh_le, MEMSUFFIX) (target_ulong EA)
-{
- uint16_t tmp;
- tmp = glue(ldu16r, MEMSUFFIX)(EA);
- return ((uint64_t)tmp << 48) | ((uint64_t)tmp << 16);
-}
-PPC_SPE_LD_OP(h_le, spe_lh_le);
-static always_inline uint64_t glue(spe_lwwsplat, MEMSUFFIX) (target_ulong EA)
-{
- uint32_t tmp;
- tmp = glue(ldu32, MEMSUFFIX)(EA);
- return ((uint64_t)tmp << 32) | (uint64_t)tmp;
-}
-PPC_SPE_LD_OP(wwsplat, spe_lwwsplat);
-static always_inline
-uint64_t glue(spe_lwwsplat_le, MEMSUFFIX) (target_ulong EA)
-{
- uint32_t tmp;
- tmp = glue(ldu32r, MEMSUFFIX)(EA);
- return ((uint64_t)tmp << 32) | (uint64_t)tmp;
-}
-PPC_SPE_LD_OP(wwsplat_le, spe_lwwsplat_le);
-static always_inline uint64_t glue(spe_lwhsplat, MEMSUFFIX) (target_ulong EA)
-{
- uint64_t ret;
- uint16_t tmp;
- tmp = glue(ldu16, MEMSUFFIX)(EA);
- ret = ((uint64_t)tmp << 48) | ((uint64_t)tmp << 32);
- tmp = glue(ldu16, MEMSUFFIX)(EA + 2);
- ret |= ((uint64_t)tmp << 16) | (uint64_t)tmp;
- return ret;
-}
-PPC_SPE_LD_OP(whsplat, spe_lwhsplat);
-static always_inline
-uint64_t glue(spe_lwhsplat_le, MEMSUFFIX) (target_ulong EA)
-{
- uint64_t ret;
- uint16_t tmp;
- tmp = glue(ldu16r, MEMSUFFIX)(EA);
- ret = ((uint64_t)tmp << 48) | ((uint64_t)tmp << 32);
- tmp = glue(ldu16r, MEMSUFFIX)(EA + 2);
- ret |= ((uint64_t)tmp << 16) | (uint64_t)tmp;
- return ret;
-}
-PPC_SPE_LD_OP(whsplat_le, spe_lwhsplat_le);
-
-#undef MEMSUFFIX
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index aaec6d8e1..fe58d7acd 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -66,17 +66,12 @@ static TCGv cpu_nip;
static TCGv cpu_ctr;
static TCGv cpu_lr;
static TCGv cpu_xer;
+static TCGv cpu_reserve;
static TCGv_i32 cpu_fpscr;
+static TCGv_i32 cpu_access_type;
/* dyngen register indexes */
static TCGv cpu_T[3];
-#if defined(TARGET_PPC64)
-#define cpu_T64 cpu_T
-#else
-static TCGv_i64 cpu_T64[3];
-#endif
-static TCGv_i64 cpu_FT[2];
-static TCGv_i64 cpu_AVRh[3], cpu_AVRl[3];
#include "gen-icount.h"
@@ -107,32 +102,6 @@ void ppc_translate_init(void)
cpu_T[2] = tcg_global_reg_new(TCG_AREG3, "T2");
#endif
#endif
-#if !defined(TARGET_PPC64)
- cpu_T64[0] = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, t0_64),
- "T0_64");
- cpu_T64[1] = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, t1_64),
- "T1_64");
- cpu_T64[2] = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, t2_64),
- "T2_64");
-#endif
-
- cpu_FT[0] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, ft0), "FT0");
- cpu_FT[1] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, ft1), "FT1");
-
- cpu_AVRh[0] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, avr0.u64[0]), "AVR0H");
- cpu_AVRl[0] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, avr0.u64[1]), "AVR0L");
- cpu_AVRh[1] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, avr1.u64[0]), "AVR1H");
- cpu_AVRl[1] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, avr1.u64[1]), "AVR1L");
- cpu_AVRh[2] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, avr2.u64[0]), "AVR2H");
- cpu_AVRl[2] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, avr2.u64[1]), "AVR2L");
p = cpu_reg_names;
@@ -161,13 +130,23 @@ void ppc_translate_init(void)
p += (i < 10) ? 4 : 5;
sprintf(p, "avr%dH", i);
+#ifdef WORDS_BIGENDIAN
cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, avr[i].u64[0]), p);
+ offsetof(CPUState, avr[i].u64[0]), p);
+#else
+ cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0,
+ offsetof(CPUState, avr[i].u64[1]), p);
+#endif
p += (i < 10) ? 6 : 7;
sprintf(p, "avr%dL", i);
+#ifdef WORDS_BIGENDIAN
+ cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0,
+ offsetof(CPUState, avr[i].u64[1]), p);
+#else
cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUState, avr[i].u64[1]), p);
+ offsetof(CPUState, avr[i].u64[0]), p);
+#endif
p += (i < 10) ? 6 : 7;
}
@@ -183,9 +162,15 @@ void ppc_translate_init(void)
cpu_xer = tcg_global_mem_new(TCG_AREG0,
offsetof(CPUState, xer), "xer");
+ cpu_reserve = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUState, reserve), "reserve");
+
cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
offsetof(CPUState, fpscr), "fpscr");
+ cpu_access_type = tcg_global_mem_new_i32(TCG_AREG0,
+ offsetof(CPUState, access_type), "access_type");
+
/* register helpers */
#define GEN_HELPER 2
#include "helper.h"
@@ -218,7 +203,6 @@ typedef struct DisasContext {
int spe_enabled;
ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
int singlestep_enabled;
- int dcache_line_size;
} DisasContext;
struct opc_handler_t {
@@ -281,6 +265,11 @@ static always_inline void gen_optimize_fprf (void)
#endif
}
+static always_inline void gen_set_access_type(int access_type)
+{
+ tcg_gen_movi_i32(cpu_access_type, access_type);
+}
+
static always_inline void gen_update_nip (DisasContext *ctx, target_ulong nip)
{
#if defined(TARGET_PPC64)
@@ -2483,49 +2472,25 @@ static always_inline void gen_addr_register (TCGv EA,
tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
}
-#if defined(TARGET_PPC64)
-#define _GEN_MEM_FUNCS(name, mode) \
- &gen_op_##name##_##mode, \
- &gen_op_##name##_le_##mode, \
- &gen_op_##name##_64_##mode, \
- &gen_op_##name##_le_64_##mode
-#else
-#define _GEN_MEM_FUNCS(name, mode) \
- &gen_op_##name##_##mode, \
- &gen_op_##name##_le_##mode
-#endif
-#if defined(CONFIG_USER_ONLY)
-#if defined(TARGET_PPC64)
-#define NB_MEM_FUNCS 4
-#else
-#define NB_MEM_FUNCS 2
-#endif
-#define GEN_MEM_FUNCS(name) \
- _GEN_MEM_FUNCS(name, raw)
-#else
-#if defined(TARGET_PPC64)
-#define NB_MEM_FUNCS 12
-#else
-#define NB_MEM_FUNCS 6
-#endif
-#define GEN_MEM_FUNCS(name) \
- _GEN_MEM_FUNCS(name, user), \
- _GEN_MEM_FUNCS(name, kernel), \
- _GEN_MEM_FUNCS(name, hypv)
-#endif
+static always_inline void gen_check_align (DisasContext *ctx, TCGv EA, int mask)
+{
+ int l1 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t1, t2;
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ tcg_gen_andi_tl(t0, EA, mask);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
+ t1 = tcg_const_i32(POWERPC_EXCP_ALIGN);
+ t2 = tcg_const_i32(0);
+ gen_helper_raise_exception_err(t1, t2);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ gen_set_label(l1);
+ tcg_temp_free(t0);
+}
/*** Integer load ***/
-#define op_ldst(name) (*gen_op_##name[ctx->mem_idx])()
-#define OP_LD_TABLE(width) \
-static GenOpFunc *gen_op_l##width[NB_MEM_FUNCS] = { \
- GEN_MEM_FUNCS(l##width), \
-};
-#define OP_ST_TABLE(width) \
-static GenOpFunc *gen_op_st##width[NB_MEM_FUNCS] = { \
- GEN_MEM_FUNCS(st##width), \
-};
-
-
#if defined(TARGET_PPC64)
#define GEN_QEMU_LD_PPC64(width) \
static always_inline void gen_qemu_ld##width##_ppc64(TCGv t0, TCGv t1, int flags)\
@@ -2691,10 +2656,10 @@ static always_inline void gen_qemu_st64(TCGv arg0, TCGv arg1, int flags)
#else /* defined(TARGET_PPC64) */
-#define GEN_QEMU_LD_PPC32(width) \
-static always_inline void gen_qemu_ld##width##_ppc32(TCGv arg0, TCGv arg1, int flags)\
-{ \
- tcg_gen_qemu_ld##width(arg0, arg1, flags >> 1); \
+#define GEN_QEMU_LD_PPC32(width) \
+static always_inline void gen_qemu_ld##width##_ppc32(TCGv arg0, TCGv arg1, int flags) \
+{ \
+ tcg_gen_qemu_ld##width(arg0, arg1, flags >> 1); \
}
GEN_QEMU_LD_PPC32(8u)
GEN_QEMU_LD_PPC32(8s)
@@ -2702,15 +2667,23 @@ GEN_QEMU_LD_PPC32(16u)
GEN_QEMU_LD_PPC32(16s)
GEN_QEMU_LD_PPC32(32u)
GEN_QEMU_LD_PPC32(32s)
+static always_inline void gen_qemu_ld64_ppc32(TCGv_i64 arg0, TCGv arg1, int flags)
+{
+ tcg_gen_qemu_ld64(arg0, arg1, flags >> 1);
+}
-#define GEN_QEMU_ST_PPC32(width) \
-static always_inline void gen_qemu_st##width##_ppc32(TCGv arg0, TCGv arg1, int flags)\
-{ \
- tcg_gen_qemu_st##width(arg0, arg1, flags >> 1); \
+#define GEN_QEMU_ST_PPC32(width) \
+static always_inline void gen_qemu_st##width##_ppc32(TCGv arg0, TCGv arg1, int flags) \
+{ \
+ tcg_gen_qemu_st##width(arg0, arg1, flags >> 1); \
}
GEN_QEMU_ST_PPC32(8)
GEN_QEMU_ST_PPC32(16)
GEN_QEMU_ST_PPC32(32)
+static always_inline void gen_qemu_st64_ppc32(TCGv_i64 arg0, TCGv arg1, int flags)
+{
+ tcg_gen_qemu_st64(arg0, arg1, flags >> 1);
+}
static always_inline void gen_qemu_ld8u(TCGv arg0, TCGv arg1, int flags)
{
@@ -2746,6 +2719,13 @@ static always_inline void gen_qemu_ld32u(TCGv arg0, TCGv arg1, int flags)
tcg_gen_bswap_i32(arg0, arg0);
}
+static always_inline void gen_qemu_ld64(TCGv_i64 arg0, TCGv arg1, int flags)
+{
+ gen_qemu_ld64_ppc32(arg0, arg1, flags);
+ if (unlikely(flags & 1))
+ tcg_gen_bswap_i64(arg0, arg0);
+}
+
static always_inline void gen_qemu_st8(TCGv arg0, TCGv arg1, int flags)
{
gen_qemu_st8_ppc32(arg0, arg1, flags);
@@ -2774,19 +2754,30 @@ static always_inline void gen_qemu_st32(TCGv arg0, TCGv arg1, int flags)
gen_qemu_st32_ppc32(arg0, arg1, flags);
}
+static always_inline void gen_qemu_st64(TCGv_i64 arg0, TCGv arg1, int flags)
+{
+ if (unlikely(flags & 1)) {
+ TCGv_i64 temp = tcg_temp_new_i64();
+ tcg_gen_bswap_i64(temp, arg0);
+ gen_qemu_st64_ppc32(temp, arg1, flags);
+ tcg_temp_free_i64(temp);
+ } else
+ gen_qemu_st64_ppc32(arg0, arg1, flags);
+}
#endif
-#define GEN_LD(width, opc, type) \
-GEN_HANDLER(l##width, opc, 0xFF, 0xFF, 0x00000000, type) \
+#define GEN_LD(name, ldop, opc, type) \
+GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type) \
{ \
- TCGv EA = tcg_temp_new(); \
+ TCGv EA = tcg_temp_new(); \
+ gen_set_access_type(ACCESS_INT); \
gen_addr_imm_index(EA, ctx, 0); \
- gen_qemu_ld##width(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_qemu_##ldop(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
tcg_temp_free(EA); \
}
-#define GEN_LDU(width, opc, type) \
-GEN_HANDLER(l##width##u, opc, 0xFF, 0xFF, 0x00000000, type) \
+#define GEN_LDU(name, ldop, opc, type) \
+GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type) \
{ \
TCGv EA; \
if (unlikely(rA(ctx->opcode) == 0 || \
@@ -2794,18 +2785,19 @@ GEN_HANDLER(l##width##u, opc, 0xFF, 0xFF, 0x00000000, type) \
GEN_EXCP_INVAL(ctx); \
return; \
} \
- EA = tcg_temp_new(); \
+ EA = tcg_temp_new(); \
+ gen_set_access_type(ACCESS_INT); \
if (type == PPC_64B) \
gen_addr_imm_index(EA, ctx, 0x03); \
else \
gen_addr_imm_index(EA, ctx, 0); \
- gen_qemu_ld##width(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_qemu_##ldop(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
-#define GEN_LDUX(width, opc2, opc3, type) \
-GEN_HANDLER(l##width##ux, 0x1F, opc2, opc3, 0x00000001, type) \
+#define GEN_LDUX(name, ldop, opc2, opc3, type) \
+GEN_HANDLER(name##ux, 0x1F, opc2, opc3, 0x00000001, type) \
{ \
TCGv EA; \
if (unlikely(rA(ctx->opcode) == 0 || \
@@ -2813,45 +2805,47 @@ GEN_HANDLER(l##width##ux, 0x1F, opc2, opc3, 0x00000001, type) \
GEN_EXCP_INVAL(ctx); \
return; \
} \
- EA = tcg_temp_new(); \
+ EA = tcg_temp_new(); \
+ gen_set_access_type(ACCESS_INT); \
gen_addr_reg_index(EA, ctx); \
- gen_qemu_ld##width(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_qemu_##ldop(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
-#define GEN_LDX(width, opc2, opc3, type) \
-GEN_HANDLER(l##width##x, 0x1F, opc2, opc3, 0x00000001, type) \
+#define GEN_LDX(name, ldop, opc2, opc3, type) \
+GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type) \
{ \
- TCGv EA = tcg_temp_new(); \
+ TCGv EA = tcg_temp_new(); \
+ gen_set_access_type(ACCESS_INT); \
gen_addr_reg_index(EA, ctx); \
- gen_qemu_ld##width(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_qemu_##ldop(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
tcg_temp_free(EA); \
}
-#define GEN_LDS(width, op, type) \
-GEN_LD(width, op | 0x20, type); \
-GEN_LDU(width, op | 0x21, type); \
-GEN_LDUX(width, 0x17, op | 0x01, type); \
-GEN_LDX(width, 0x17, op | 0x00, type)
+#define GEN_LDS(name, ldop, op, type) \
+GEN_LD(name, ldop, op | 0x20, type); \
+GEN_LDU(name, ldop, op | 0x21, type); \
+GEN_LDUX(name, ldop, 0x17, op | 0x01, type); \
+GEN_LDX(name, ldop, 0x17, op | 0x00, type)
/* lbz lbzu lbzux lbzx */
-GEN_LDS(8u, 0x02, PPC_INTEGER);
+GEN_LDS(lbz, ld8u, 0x02, PPC_INTEGER);
/* lha lhau lhaux lhax */
-GEN_LDS(16s, 0x0A, PPC_INTEGER);
+GEN_LDS(lha, ld16s, 0x0A, PPC_INTEGER);
/* lhz lhzu lhzux lhzx */
-GEN_LDS(16u, 0x08, PPC_INTEGER);
+GEN_LDS(lhz, ld16u, 0x08, PPC_INTEGER);
/* lwz lwzu lwzux lwzx */
-GEN_LDS(32u, 0x00, PPC_INTEGER);
+GEN_LDS(lwz, ld32u, 0x00, PPC_INTEGER);
#if defined(TARGET_PPC64)
/* lwaux */
-GEN_LDUX(32s, 0x15, 0x0B, PPC_64B);
+GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B);
/* lwax */
-GEN_LDX(32s, 0x15, 0x0A, PPC_64B);
+GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B);
/* ldux */
-GEN_LDUX(64, 0x15, 0x01, PPC_64B);
+GEN_LDUX(ld, ld64, 0x15, 0x01, PPC_64B);
/* ldx */
-GEN_LDX(64, 0x15, 0x00, PPC_64B);
+GEN_LDX(ld, ld64, 0x15, 0x00, PPC_64B);
GEN_HANDLER(ld, 0x3A, 0xFF, 0xFF, 0x00000000, PPC_64B)
{
TCGv EA;
@@ -2863,6 +2857,7 @@ GEN_HANDLER(ld, 0x3A, 0xFF, 0xFF, 0x00000000, PPC_64B)
}
}
EA = tcg_temp_new();
+ gen_set_access_type(ACCESS_INT);
gen_addr_imm_index(EA, ctx, 0x03);
if (ctx->opcode & 0x02) {
/* lwa (lwau is undefined) */
@@ -2901,6 +2896,7 @@ GEN_HANDLER(lq, 0x38, 0xFF, 0xFF, 0x00000000, PPC_64BX)
return;
}
EA = tcg_temp_new();
+ gen_set_access_type(ACCESS_INT);
gen_addr_imm_index(EA, ctx, 0x0F);
gen_qemu_ld64(cpu_gpr[rd], EA, ctx->mem_idx);
tcg_gen_addi_tl(EA, EA, 8);
@@ -2911,72 +2907,76 @@ GEN_HANDLER(lq, 0x38, 0xFF, 0xFF, 0x00000000, PPC_64BX)
#endif
/*** Integer store ***/
-#define GEN_ST(width, opc, type) \
-GEN_HANDLER(st##width, opc, 0xFF, 0xFF, 0x00000000, type) \
+#define GEN_ST(name, stop, opc, type) \
+GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type) \
{ \
- TCGv EA = tcg_temp_new(); \
+ TCGv EA = tcg_temp_new(); \
+ gen_set_access_type(ACCESS_INT); \
gen_addr_imm_index(EA, ctx, 0); \
- gen_qemu_st##width(cpu_gpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_qemu_##stop(cpu_gpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
tcg_temp_free(EA); \
}
-#define GEN_STU(width, opc, type) \
-GEN_HANDLER(st##width##u, opc, 0xFF, 0xFF, 0x00000000, type) \
+#define GEN_STU(name, stop, opc, type) \
+GEN_HANDLER(stop##u, opc, 0xFF, 0xFF, 0x00000000, type) \
{ \
TCGv EA; \
if (unlikely(rA(ctx->opcode) == 0)) { \
GEN_EXCP_INVAL(ctx); \
return; \
} \
- EA = tcg_temp_new(); \
+ EA = tcg_temp_new(); \
+ gen_set_access_type(ACCESS_INT); \
if (type == PPC_64B) \
gen_addr_imm_index(EA, ctx, 0x03); \
else \
gen_addr_imm_index(EA, ctx, 0); \
- gen_qemu_st##width(cpu_gpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_qemu_##stop(cpu_gpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
-#define GEN_STUX(width, opc2, opc3, type) \
-GEN_HANDLER(st##width##ux, 0x1F, opc2, opc3, 0x00000001, type) \
+#define GEN_STUX(name, stop, opc2, opc3, type) \
+GEN_HANDLER(name##ux, 0x1F, opc2, opc3, 0x00000001, type) \
{ \
TCGv EA; \
if (unlikely(rA(ctx->opcode) == 0)) { \
GEN_EXCP_INVAL(ctx); \
return; \
} \
- EA = tcg_temp_new(); \
+ EA = tcg_temp_new(); \
+ gen_set_access_type(ACCESS_INT); \
gen_addr_reg_index(EA, ctx); \
- gen_qemu_st##width(cpu_gpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_qemu_##stop(cpu_gpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
-#define GEN_STX(width, opc2, opc3, type) \
-GEN_HANDLER(st##width##x, 0x1F, opc2, opc3, 0x00000001, type) \
+#define GEN_STX(name, stop, opc2, opc3, type) \
+GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type) \
{ \
- TCGv EA = tcg_temp_new(); \
+ TCGv EA = tcg_temp_new(); \
+ gen_set_access_type(ACCESS_INT); \
gen_addr_reg_index(EA, ctx); \
- gen_qemu_st##width(cpu_gpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_qemu_##stop(cpu_gpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
tcg_temp_free(EA); \
}
-#define GEN_STS(width, op, type) \
-GEN_ST(width, op | 0x20, type); \
-GEN_STU(width, op | 0x21, type); \
-GEN_STUX(width, 0x17, op | 0x01, type); \
-GEN_STX(width, 0x17, op | 0x00, type)
+#define GEN_STS(name, stop, op, type) \
+GEN_ST(name, stop, op | 0x20, type); \
+GEN_STU(name, stop, op | 0x21, type); \
+GEN_STUX(name, stop, 0x17, op | 0x01, type); \
+GEN_STX(name, stop, 0x17, op | 0x00, type)
/* stb stbu stbux stbx */
-GEN_STS(8, 0x06, PPC_INTEGER);
+GEN_STS(stb, st8, 0x06, PPC_INTEGER);
/* sth sthu sthux sthx */
-GEN_STS(16, 0x0C, PPC_INTEGER);
+GEN_STS(sth, st16, 0x0C, PPC_INTEGER);
/* stw stwu stwux stwx */
-GEN_STS(32, 0x04, PPC_INTEGER);
+GEN_STS(stw, st32, 0x04, PPC_INTEGER);
#if defined(TARGET_PPC64)
-GEN_STUX(64, 0x15, 0x05, PPC_64B);
-GEN_STX(64, 0x15, 0x04, PPC_64B);
+GEN_STUX(std, st64, 0x15, 0x05, PPC_64B);
+GEN_STX(std, st64, 0x15, 0x04, PPC_64B);
GEN_HANDLER(std, 0x3E, 0xFF, 0xFF, 0x00000000, PPC_64B)
{
int rs;
@@ -3002,6 +3002,7 @@ GEN_HANDLER(std, 0x3E, 0xFF, 0xFF, 0x00000000, PPC_64B)
return;
}
EA = tcg_temp_new();
+ gen_set_access_type(ACCESS_INT);
gen_addr_imm_index(EA, ctx, 0x03);
gen_qemu_st64(cpu_gpr[rs], EA, ctx->mem_idx);
tcg_gen_addi_tl(EA, EA, 8);
@@ -3017,6 +3018,7 @@ GEN_HANDLER(std, 0x3E, 0xFF, 0xFF, 0x00000000, PPC_64B)
}
}
EA = tcg_temp_new();
+ gen_set_access_type(ACCESS_INT);
gen_addr_imm_index(EA, ctx, 0x03);
gen_qemu_st64(cpu_gpr[rs], EA, ctx->mem_idx);
if (Rc(ctx->opcode))
@@ -3036,7 +3038,7 @@ void always_inline gen_qemu_ld16ur(TCGv t0, TCGv t1, int flags)
tcg_gen_extu_i32_tl(t0, temp);
tcg_temp_free_i32(temp);
}
-GEN_LDX(16ur, 0x16, 0x18, PPC_INTEGER);
+GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
/* lwbrx */
void always_inline gen_qemu_ld32ur(TCGv t0, TCGv t1, int flags)
@@ -3048,7 +3050,7 @@ void always_inline gen_qemu_ld32ur(TCGv t0, TCGv t1, int flags)
tcg_gen_extu_i32_tl(t0, temp);
tcg_temp_free_i32(temp);
}
-GEN_LDX(32ur, 0x16, 0x10, PPC_INTEGER);
+GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
/* sthbrx */
void always_inline gen_qemu_st16r(TCGv t0, TCGv t1, int flags)
@@ -3063,7 +3065,7 @@ void always_inline gen_qemu_st16r(TCGv t0, TCGv t1, int flags)
gen_qemu_st16(t2, t1, flags);
tcg_temp_free(t2);
}
-GEN_STX(16r, 0x16, 0x1C, PPC_INTEGER);
+GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
/* stwbrx */
void always_inline gen_qemu_st32r(TCGv t0, TCGv t1, int flags)
@@ -3077,73 +3079,36 @@ void always_inline gen_qemu_st32r(TCGv t0, TCGv t1, int flags)
gen_qemu_st32(t2, t1, flags);
tcg_temp_free(t2);
}
-GEN_STX(32r, 0x16, 0x14, PPC_INTEGER);
+GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
/*** Integer load and store multiple ***/
-#define op_ldstm(name, reg) (*gen_op_##name[ctx->mem_idx])(reg)
-static GenOpFunc1 *gen_op_lmw[NB_MEM_FUNCS] = {
- GEN_MEM_FUNCS(lmw),
-};
-static GenOpFunc1 *gen_op_stmw[NB_MEM_FUNCS] = {
- GEN_MEM_FUNCS(stmw),
-};
-
/* lmw */
GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
{
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_const_i32(rD(ctx->opcode));
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_imm_index(cpu_T[0], ctx, 0);
- op_ldstm(lmw, rD(ctx->opcode));
+ gen_addr_imm_index(t0, ctx, 0);
+ gen_helper_lmw(t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
}
/* stmw */
GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
{
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_const_i32(rS(ctx->opcode));
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_imm_index(cpu_T[0], ctx, 0);
- op_ldstm(stmw, rS(ctx->opcode));
+ gen_addr_imm_index(t0, ctx, 0);
+ gen_helper_stmw(t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
}
/*** Integer load and store strings ***/
-#define op_ldsts(name, start) (*gen_op_##name[ctx->mem_idx])(start)
-#define op_ldstsx(name, rd, ra, rb) (*gen_op_##name[ctx->mem_idx])(rd, ra, rb)
-/* string load & stores are by definition endian-safe */
-#define gen_op_lswi_le_raw gen_op_lswi_raw
-#define gen_op_lswi_le_user gen_op_lswi_user
-#define gen_op_lswi_le_kernel gen_op_lswi_kernel
-#define gen_op_lswi_le_hypv gen_op_lswi_hypv
-#define gen_op_lswi_le_64_raw gen_op_lswi_raw
-#define gen_op_lswi_le_64_user gen_op_lswi_user
-#define gen_op_lswi_le_64_kernel gen_op_lswi_kernel
-#define gen_op_lswi_le_64_hypv gen_op_lswi_hypv
-static GenOpFunc1 *gen_op_lswi[NB_MEM_FUNCS] = {
- GEN_MEM_FUNCS(lswi),
-};
-#define gen_op_lswx_le_raw gen_op_lswx_raw
-#define gen_op_lswx_le_user gen_op_lswx_user
-#define gen_op_lswx_le_kernel gen_op_lswx_kernel
-#define gen_op_lswx_le_hypv gen_op_lswx_hypv
-#define gen_op_lswx_le_64_raw gen_op_lswx_raw
-#define gen_op_lswx_le_64_user gen_op_lswx_user
-#define gen_op_lswx_le_64_kernel gen_op_lswx_kernel
-#define gen_op_lswx_le_64_hypv gen_op_lswx_hypv
-static GenOpFunc3 *gen_op_lswx[NB_MEM_FUNCS] = {
- GEN_MEM_FUNCS(lswx),
-};
-#define gen_op_stsw_le_raw gen_op_stsw_raw
-#define gen_op_stsw_le_user gen_op_stsw_user
-#define gen_op_stsw_le_kernel gen_op_stsw_kernel
-#define gen_op_stsw_le_hypv gen_op_stsw_hypv
-#define gen_op_stsw_le_64_raw gen_op_stsw_raw
-#define gen_op_stsw_le_64_user gen_op_stsw_user
-#define gen_op_stsw_le_64_kernel gen_op_stsw_kernel
-#define gen_op_stsw_le_64_hypv gen_op_stsw_hypv
-static GenOpFunc1 *gen_op_stsw[NB_MEM_FUNCS] = {
- GEN_MEM_FUNCS(stsw),
-};
-
/* lswi */
/* PowerPC32 specification says we must generate an exception if
* rA is in the range of registers to be loaded.
@@ -3152,6 +3117,8 @@ static GenOpFunc1 *gen_op_stsw[NB_MEM_FUNCS] = {
*/
GEN_HANDLER(lswi, 0x1F, 0x15, 0x12, 0x00000001, PPC_STRING)
{
+ TCGv t0;
+ TCGv_i32 t1, t2;
int nb = NB(ctx->opcode);
int start = rD(ctx->opcode);
int ra = rA(ctx->opcode);
@@ -3169,49 +3136,67 @@ GEN_HANDLER(lswi, 0x1F, 0x15, 0x12, 0x00000001, PPC_STRING)
}
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_register(cpu_T[0], ctx);
- tcg_gen_movi_tl(cpu_T[1], nb);
- op_ldsts(lswi, start);
+ t0 = tcg_temp_new();
+ gen_addr_register(t0, ctx);
+ t1 = tcg_const_i32(nb);
+ t2 = tcg_const_i32(start);
+ gen_helper_lsw(t0, t1, t2);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
}
/* lswx */
GEN_HANDLER(lswx, 0x1F, 0x15, 0x10, 0x00000001, PPC_STRING)
{
- int ra = rA(ctx->opcode);
- int rb = rB(ctx->opcode);
-
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_const_i32(rD(ctx->opcode));
+ TCGv_i32 t2 = tcg_const_i32(rA(ctx->opcode));
+ TCGv_i32 t3 = tcg_const_i32(rB(ctx->opcode));
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_reg_index(cpu_T[0], ctx);
- if (ra == 0) {
- ra = rb;
- }
- tcg_gen_andi_tl(cpu_T[1], cpu_xer, 0x7F);
- op_ldstsx(lswx, rD(ctx->opcode), ra, rb);
+ gen_addr_reg_index(t0, ctx);
+ gen_helper_lswx(t0, t1, t2, t3);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
}
/* stswi */
GEN_HANDLER(stswi, 0x1F, 0x15, 0x16, 0x00000001, PPC_STRING)
{
int nb = NB(ctx->opcode);
-
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t1;
+ TCGv_i32 t2 = tcg_const_i32(rS(ctx->opcode));
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_register(cpu_T[0], ctx);
+ gen_addr_register(t0, ctx);
if (nb == 0)
nb = 32;
- tcg_gen_movi_tl(cpu_T[1], nb);
- op_ldsts(stsw, rS(ctx->opcode));
+ t1 = tcg_const_i32(nb);
+ gen_helper_stsw(t0, t1, t2);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
}
/* stswx */
GEN_HANDLER(stswx, 0x1F, 0x15, 0x14, 0x00000001, PPC_STRING)
{
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ TCGv_i32 t2 = tcg_const_i32(rS(ctx->opcode));
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_reg_index(cpu_T[0], ctx);
- tcg_gen_andi_tl(cpu_T[1], cpu_xer, 0x7F);
- op_ldsts(stsw, rS(ctx->opcode));
+ gen_addr_reg_index(t0, ctx);
+ tcg_gen_trunc_tl_i32(t1, cpu_xer);
+ tcg_gen_andi_i32(t1, t1, 0x7F);
+ gen_helper_stsw(t0, t1, t2);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
}
/*** Memory synchronisation ***/
@@ -3226,63 +3211,79 @@ GEN_HANDLER(isync, 0x13, 0x16, 0x04, 0x03FFF801, PPC_MEM)
GEN_STOP(ctx);
}
-#define op_lwarx() (*gen_op_lwarx[ctx->mem_idx])()
-#define op_stwcx() (*gen_op_stwcx[ctx->mem_idx])()
-static GenOpFunc *gen_op_lwarx[NB_MEM_FUNCS] = {
- GEN_MEM_FUNCS(lwarx),
-};
-static GenOpFunc *gen_op_stwcx[NB_MEM_FUNCS] = {
- GEN_MEM_FUNCS(stwcx),
-};
-
/* lwarx */
GEN_HANDLER(lwarx, 0x1F, 0x14, 0x00, 0x00000001, PPC_RES)
{
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_reg_index(cpu_T[0], ctx);
- op_lwarx();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[1]);
+ TCGv t0 = tcg_temp_local_new();
+ gen_set_access_type(ACCESS_RES);
+ gen_addr_reg_index(t0, ctx);
+ gen_check_align(ctx, t0, 0x03);
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode)
+ tcg_gen_ext32u_tl(t0, t0);
+#endif
+ gen_qemu_ld32u(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx);
+ tcg_gen_mov_tl(cpu_reserve, t0);
+ tcg_temp_free(t0);
}
/* stwcx. */
GEN_HANDLER2(stwcx_, "stwcx.", 0x1F, 0x16, 0x04, 0x00000000, PPC_RES)
{
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_reg_index(cpu_T[0], ctx);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rS(ctx->opcode)]);
- op_stwcx();
+ int l1 = gen_new_label();
+ TCGv t0 = tcg_temp_local_new();
+ gen_set_access_type(ACCESS_RES);
+ gen_addr_reg_index(t0, ctx);
+ gen_check_align(ctx, t0, 0x03);
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode)
+ tcg_gen_ext32u_tl(t0, t0);
+#endif
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_xer);
+ tcg_gen_shri_i32(cpu_crf[0], cpu_crf[0], XER_SO);
+ tcg_gen_andi_i32(cpu_crf[0], cpu_crf[0], 1);
+ tcg_gen_brcond_tl(TCG_COND_NE, t0, cpu_reserve, l1);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
+ gen_qemu_st32(cpu_gpr[rS(ctx->opcode)], t0, ctx->mem_idx);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_reserve, -1);
+ tcg_temp_free(t0);
}
#if defined(TARGET_PPC64)
-#define op_ldarx() (*gen_op_ldarx[ctx->mem_idx])()
-#define op_stdcx() (*gen_op_stdcx[ctx->mem_idx])()
-static GenOpFunc *gen_op_ldarx[NB_MEM_FUNCS] = {
- GEN_MEM_FUNCS(ldarx),
-};
-static GenOpFunc *gen_op_stdcx[NB_MEM_FUNCS] = {
- GEN_MEM_FUNCS(stdcx),
-};
-
/* ldarx */
GEN_HANDLER(ldarx, 0x1F, 0x14, 0x02, 0x00000001, PPC_64B)
{
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_reg_index(cpu_T[0], ctx);
- op_ldarx();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[1]);
+ TCGv t0 = tcg_temp_local_new();
+ gen_set_access_type(ACCESS_RES);
+ gen_addr_reg_index(t0, ctx);
+ gen_check_align(ctx, t0, 0x07);
+ if (!ctx->sf_mode)
+ tcg_gen_ext32u_tl(t0, t0);
+ gen_qemu_ld64(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx);
+ tcg_gen_mov_tl(cpu_reserve, t0);
+ tcg_temp_free(t0);
}
/* stdcx. */
GEN_HANDLER2(stdcx_, "stdcx.", 0x1F, 0x16, 0x06, 0x00000000, PPC_64B)
{
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_reg_index(cpu_T[0], ctx);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rS(ctx->opcode)]);
- op_stdcx();
+ int l1 = gen_new_label();
+ TCGv t0 = tcg_temp_local_new();
+ gen_set_access_type(ACCESS_RES);
+ gen_addr_reg_index(t0, ctx);
+ gen_check_align(ctx, t0, 0x07);
+ if (!ctx->sf_mode)
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_xer);
+ tcg_gen_shri_i32(cpu_crf[0], cpu_crf[0], XER_SO);
+ tcg_gen_andi_i32(cpu_crf[0], cpu_crf[0], 1);
+ tcg_gen_brcond_tl(TCG_COND_NE, t0, cpu_reserve, l1);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
+ gen_qemu_st64(cpu_gpr[rS(ctx->opcode)], t0, ctx->mem_idx);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_reserve, -1);
+ tcg_temp_free(t0);
}
#endif /* defined(TARGET_PPC64) */
@@ -3294,27 +3295,33 @@ GEN_HANDLER(sync, 0x1F, 0x16, 0x12, 0x039FF801, PPC_MEM_SYNC)
/* wait */
GEN_HANDLER(wait, 0x1F, 0x1E, 0x01, 0x03FFF801, PPC_WAIT)
{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_st_i32(t0, cpu_env, offsetof(CPUState, halted));
+ tcg_temp_free_i32(t0);
/* Stop translation, as the CPU is supposed to sleep from now */
- gen_op_wait();
GEN_EXCP(ctx, EXCP_HLT, 1);
}
/*** Floating-point load ***/
-#define GEN_LDF(width, opc, type) \
-GEN_HANDLER(l##width, opc, 0xFF, 0xFF, 0x00000000, type) \
+#define GEN_LDF(name, ldop, opc, type) \
+GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type) \
{ \
+ TCGv EA; \
if (unlikely(!ctx->fpu_enabled)) { \
GEN_EXCP_NO_FP(ctx); \
return; \
} \
- gen_addr_imm_index(cpu_T[0], ctx, 0); \
- op_ldst(l##width); \
- tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_FT[0]); \
+ gen_set_access_type(ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(EA, ctx, 0); \
+ gen_qemu_##ldop(cpu_fpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ tcg_temp_free(EA); \
}
-#define GEN_LDUF(width, opc, type) \
-GEN_HANDLER(l##width##u, opc, 0xFF, 0xFF, 0x00000000, type) \
+#define GEN_LDUF(name, ldop, opc, type) \
+GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type) \
{ \
+ TCGv EA; \
if (unlikely(!ctx->fpu_enabled)) { \
GEN_EXCP_NO_FP(ctx); \
return; \
@@ -3323,15 +3330,18 @@ GEN_HANDLER(l##width##u, opc, 0xFF, 0xFF, 0x00000000, type) \
GEN_EXCP_INVAL(ctx); \
return; \
} \
- gen_addr_imm_index(cpu_T[0], ctx, 0); \
- op_ldst(l##width); \
- tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_FT[0]); \
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]); \
+ gen_set_access_type(ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(EA, ctx, 0); \
+ gen_qemu_##ldop(cpu_fpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
}
-#define GEN_LDUXF(width, opc, type) \
-GEN_HANDLER(l##width##ux, 0x1F, 0x17, opc, 0x00000001, type) \
+#define GEN_LDUXF(name, ldop, opc, type) \
+GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type) \
{ \
+ TCGv EA; \
if (unlikely(!ctx->fpu_enabled)) { \
GEN_EXCP_NO_FP(ctx); \
return; \
@@ -3340,52 +3350,71 @@ GEN_HANDLER(l##width##ux, 0x1F, 0x17, opc, 0x00000001, type) \
GEN_EXCP_INVAL(ctx); \
return; \
} \
- gen_addr_reg_index(cpu_T[0], ctx); \
- op_ldst(l##width); \
- tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_FT[0]); \
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]); \
+ gen_set_access_type(ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(EA, ctx); \
+ gen_qemu_##ldop(cpu_fpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
}
-#define GEN_LDXF(width, opc2, opc3, type) \
-GEN_HANDLER(l##width##x, 0x1F, opc2, opc3, 0x00000001, type) \
+#define GEN_LDXF(name, ldop, opc2, opc3, type) \
+GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type) \
{ \
+ TCGv EA; \
if (unlikely(!ctx->fpu_enabled)) { \
GEN_EXCP_NO_FP(ctx); \
return; \
} \
- gen_addr_reg_index(cpu_T[0], ctx); \
- op_ldst(l##width); \
- tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_FT[0]); \
+ gen_set_access_type(ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(EA, ctx); \
+ gen_qemu_##ldop(cpu_fpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ tcg_temp_free(EA); \
}
-#define GEN_LDFS(width, op, type) \
-OP_LD_TABLE(width); \
-GEN_LDF(width, op | 0x20, type); \
-GEN_LDUF(width, op | 0x21, type); \
-GEN_LDUXF(width, op | 0x01, type); \
-GEN_LDXF(width, 0x17, op | 0x00, type)
+#define GEN_LDFS(name, ldop, op, type) \
+GEN_LDF(name, ldop, op | 0x20, type); \
+GEN_LDUF(name, ldop, op | 0x21, type); \
+GEN_LDUXF(name, ldop, op | 0x01, type); \
+GEN_LDXF(name, ldop, 0x17, op | 0x00, type)
+
+static always_inline void gen_qemu_ld32fs(TCGv_i64 arg1, TCGv arg2, int flags)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ gen_qemu_ld32u(t0, arg2, flags);
+ tcg_gen_trunc_tl_i32(t1, t0);
+ tcg_temp_free(t0);
+ gen_helper_float32_to_float64(arg1, t1);
+ tcg_temp_free_i32(t1);
+}
-/* lfd lfdu lfdux lfdx */
-GEN_LDFS(fd, 0x12, PPC_FLOAT);
-/* lfs lfsu lfsux lfsx */
-GEN_LDFS(fs, 0x10, PPC_FLOAT);
+ /* lfd lfdu lfdux lfdx */
+GEN_LDFS(lfd, ld64, 0x12, PPC_FLOAT);
+ /* lfs lfsu lfsux lfsx */
+GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT);
/*** Floating-point store ***/
-#define GEN_STF(width, opc, type) \
-GEN_HANDLER(st##width, opc, 0xFF, 0xFF, 0x00000000, type) \
+#define GEN_STF(name, stop, opc, type) \
+GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type) \
{ \
+ TCGv EA; \
if (unlikely(!ctx->fpu_enabled)) { \
GEN_EXCP_NO_FP(ctx); \
return; \
} \
- gen_addr_imm_index(cpu_T[0], ctx, 0); \
- tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rS(ctx->opcode)]); \
- op_ldst(st##width); \
+ gen_set_access_type(ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(EA, ctx, 0); \
+ gen_qemu_##stop(cpu_fpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
+ tcg_temp_free(EA); \
}
-#define GEN_STUF(width, opc, type) \
-GEN_HANDLER(st##width##u, opc, 0xFF, 0xFF, 0x00000000, type) \
+#define GEN_STUF(name, stop, opc, type) \
+GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type) \
{ \
+ TCGv EA; \
if (unlikely(!ctx->fpu_enabled)) { \
GEN_EXCP_NO_FP(ctx); \
return; \
@@ -3394,15 +3423,18 @@ GEN_HANDLER(st##width##u, opc, 0xFF, 0xFF, 0x00000000, type) \
GEN_EXCP_INVAL(ctx); \
return; \
} \
- gen_addr_imm_index(cpu_T[0], ctx, 0); \
- tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rS(ctx->opcode)]); \
- op_ldst(st##width); \
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]); \
+ gen_set_access_type(ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(EA, ctx, 0); \
+ gen_qemu_##stop(cpu_fpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
}
-#define GEN_STUXF(width, opc, type) \
-GEN_HANDLER(st##width##ux, 0x1F, 0x17, opc, 0x00000001, type) \
+#define GEN_STUXF(name, stop, opc, type) \
+GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type) \
{ \
+ TCGv EA; \
if (unlikely(!ctx->fpu_enabled)) { \
GEN_EXCP_NO_FP(ctx); \
return; \
@@ -3411,40 +3443,61 @@ GEN_HANDLER(st##width##ux, 0x1F, 0x17, opc, 0x00000001, type) \
GEN_EXCP_INVAL(ctx); \
return; \
} \
- gen_addr_reg_index(cpu_T[0], ctx); \
- tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rS(ctx->opcode)]); \
- op_ldst(st##width); \
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]); \
+ gen_set_access_type(ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(EA, ctx); \
+ gen_qemu_##stop(cpu_fpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
}
-#define GEN_STXF(width, opc2, opc3, type) \
-GEN_HANDLER(st##width##x, 0x1F, opc2, opc3, 0x00000001, type) \
+#define GEN_STXF(name, stop, opc2, opc3, type) \
+GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type) \
{ \
+ TCGv EA; \
if (unlikely(!ctx->fpu_enabled)) { \
GEN_EXCP_NO_FP(ctx); \
return; \
} \
- gen_addr_reg_index(cpu_T[0], ctx); \
- tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rS(ctx->opcode)]); \
- op_ldst(st##width); \
+ gen_set_access_type(ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(EA, ctx); \
+ gen_qemu_##stop(cpu_fpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
+ tcg_temp_free(EA); \
}
-#define GEN_STFS(width, op, type) \
-OP_ST_TABLE(width); \
-GEN_STF(width, op | 0x20, type); \
-GEN_STUF(width, op | 0x21, type); \
-GEN_STUXF(width, op | 0x01, type); \
-GEN_STXF(width, 0x17, op | 0x00, type)
+#define GEN_STFS(name, stop, op, type) \
+GEN_STF(name, stop, op | 0x20, type); \
+GEN_STUF(name, stop, op | 0x21, type); \
+GEN_STUXF(name, stop, op | 0x01, type); \
+GEN_STXF(name, stop, 0x17, op | 0x00, type)
+
+static always_inline void gen_qemu_st32fs(TCGv_i64 arg1, TCGv arg2, int flags)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv t1 = tcg_temp_new();
+ gen_helper_float64_to_float32(t0, arg1);
+ tcg_gen_extu_i32_tl(t1, t0);
+ tcg_temp_free_i32(t0);
+ gen_qemu_st32(t1, arg2, flags);
+ tcg_temp_free(t1);
+}
/* stfd stfdu stfdux stfdx */
-GEN_STFS(fd, 0x16, PPC_FLOAT);
+GEN_STFS(stfd, st64, 0x16, PPC_FLOAT);
/* stfs stfsu stfsux stfsx */
-GEN_STFS(fs, 0x14, PPC_FLOAT);
+GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT);
/* Optional: */
+static always_inline void gen_qemu_st32fiw(TCGv_i64 arg1, TCGv arg2, int flags)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_trunc_i64_tl(t0, arg1),
+ gen_qemu_st32(t0, arg2, flags);
+ tcg_temp_free(t0);
+}
/* stfiwx */
-OP_ST_TABLE(fiw);
-GEN_STXF(fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX);
+GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX);
/*** Branch ***/
static always_inline void gen_goto_tb (DisasContext *ctx, int n,
@@ -3679,7 +3732,7 @@ GEN_HANDLER(rfi, 0x13, 0x12, 0x01, 0x03FF8001, PPC_FLOW)
GEN_EXCP_PRIVOPC(ctx);
return;
}
- gen_op_rfi();
+ gen_helper_rfi();
GEN_SYNC(ctx);
#endif
}
@@ -3695,7 +3748,7 @@ GEN_HANDLER(rfid, 0x13, 0x12, 0x00, 0x03FF8001, PPC_64B)
GEN_EXCP_PRIVOPC(ctx);
return;
}
- gen_op_rfid();
+ gen_helper_rfid();
GEN_SYNC(ctx);
#endif
}
@@ -3710,7 +3763,7 @@ GEN_HANDLER(hrfid, 0x13, 0x12, 0x08, 0x03FF8001, PPC_64H)
GEN_EXCP_PRIVOPC(ctx);
return;
}
- gen_op_hrfid();
+ gen_helper_hrfid();
GEN_SYNC(ctx);
#endif
}
@@ -3734,42 +3787,46 @@ GEN_HANDLER(sc, 0x11, 0xFF, 0xFF, 0x03FFF01D, PPC_FLOW)
/* tw */
GEN_HANDLER(tw, 0x1F, 0x04, 0x00, 0x00000001, PPC_FLOW)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
+ TCGv_i32 t0 = tcg_const_i32(TO(ctx->opcode));
/* Update the nip since this might generate a trap exception */
gen_update_nip(ctx, ctx->nip);
- gen_op_tw(TO(ctx->opcode));
+ gen_helper_tw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0);
+ tcg_temp_free_i32(t0);
}
/* twi */
GEN_HANDLER(twi, 0x03, 0xFF, 0xFF, 0x00000000, PPC_FLOW)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_movi_tl(cpu_T[1], SIMM(ctx->opcode));
+ TCGv t0 = tcg_const_tl(SIMM(ctx->opcode));
+ TCGv_i32 t1 = tcg_const_i32(TO(ctx->opcode));
/* Update the nip since this might generate a trap exception */
gen_update_nip(ctx, ctx->nip);
- gen_op_tw(TO(ctx->opcode));
+ gen_helper_tw(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
}
#if defined(TARGET_PPC64)
/* td */
GEN_HANDLER(td, 0x1F, 0x04, 0x02, 0x00000001, PPC_64B)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]);
+ TCGv_i32 t0 = tcg_const_i32(TO(ctx->opcode));
/* Update the nip since this might generate a trap exception */
gen_update_nip(ctx, ctx->nip);
- gen_op_td(TO(ctx->opcode));
+ gen_helper_td(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0);
+ tcg_temp_free_i32(t0);
}
/* tdi */
GEN_HANDLER(tdi, 0x02, 0xFF, 0xFF, 0x00000000, PPC_64B)
{
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_movi_tl(cpu_T[1], SIMM(ctx->opcode));
+ TCGv t0 = tcg_const_tl(SIMM(ctx->opcode));
+ TCGv_i32 t1 = tcg_const_i32(TO(ctx->opcode));
/* Update the nip since this might generate a trap exception */
gen_update_nip(ctx, ctx->nip);
- gen_op_td(TO(ctx->opcode));
+ gen_helper_td(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
}
#endif
@@ -4011,6 +4068,7 @@ GEN_HANDLER(dcbf, 0x1F, 0x16, 0x02, 0x03C00001, PPC_CACHE)
{
/* XXX: specification says this is treated as a load by the MMU */
TCGv t0 = tcg_temp_new();
+ gen_set_access_type(ACCESS_CACHE);
gen_addr_reg_index(t0, ctx);
gen_qemu_ld8u(t0, t0, ctx->mem_idx);
tcg_temp_free(t0);
@@ -4028,6 +4086,7 @@ GEN_HANDLER(dcbi, 0x1F, 0x16, 0x0E, 0x03E00001, PPC_CACHE)
return;
}
EA = tcg_temp_new();
+ gen_set_access_type(ACCESS_CACHE);
gen_addr_reg_index(EA, ctx);
val = tcg_temp_new();
/* XXX: specification says this should be treated as a store by the MMU */
@@ -4043,6 +4102,7 @@ GEN_HANDLER(dcbst, 0x1F, 0x16, 0x01, 0x03E00001, PPC_CACHE)
{
/* XXX: specification say this is treated as a load by the MMU */
TCGv t0 = tcg_temp_new();
+ gen_set_access_type(ACCESS_CACHE);
gen_addr_reg_index(t0, ctx);
gen_qemu_ld8u(t0, t0, ctx->mem_idx);
tcg_temp_free(t0);
@@ -4067,117 +4127,38 @@ GEN_HANDLER(dcbtst, 0x1F, 0x16, 0x07, 0x02000001, PPC_CACHE)
}
/* dcbz */
-#define op_dcbz(n) (*gen_op_dcbz[n][ctx->mem_idx])()
-static GenOpFunc *gen_op_dcbz[4][NB_MEM_FUNCS] = {
- /* 32 bytes cache line size */
- {
-#define gen_op_dcbz_l32_le_raw gen_op_dcbz_l32_raw
-#define gen_op_dcbz_l32_le_user gen_op_dcbz_l32_user
-#define gen_op_dcbz_l32_le_kernel gen_op_dcbz_l32_kernel
-#define gen_op_dcbz_l32_le_hypv gen_op_dcbz_l32_hypv
-#define gen_op_dcbz_l32_le_64_raw gen_op_dcbz_l32_64_raw
-#define gen_op_dcbz_l32_le_64_user gen_op_dcbz_l32_64_user
-#define gen_op_dcbz_l32_le_64_kernel gen_op_dcbz_l32_64_kernel
-#define gen_op_dcbz_l32_le_64_hypv gen_op_dcbz_l32_64_hypv
- GEN_MEM_FUNCS(dcbz_l32),
- },
- /* 64 bytes cache line size */
- {
-#define gen_op_dcbz_l64_le_raw gen_op_dcbz_l64_raw
-#define gen_op_dcbz_l64_le_user gen_op_dcbz_l64_user
-#define gen_op_dcbz_l64_le_kernel gen_op_dcbz_l64_kernel
-#define gen_op_dcbz_l64_le_hypv gen_op_dcbz_l64_hypv
-#define gen_op_dcbz_l64_le_64_raw gen_op_dcbz_l64_64_raw
-#define gen_op_dcbz_l64_le_64_user gen_op_dcbz_l64_64_user
-#define gen_op_dcbz_l64_le_64_kernel gen_op_dcbz_l64_64_kernel
-#define gen_op_dcbz_l64_le_64_hypv gen_op_dcbz_l64_64_hypv
- GEN_MEM_FUNCS(dcbz_l64),
- },
- /* 128 bytes cache line size */
- {
-#define gen_op_dcbz_l128_le_raw gen_op_dcbz_l128_raw
-#define gen_op_dcbz_l128_le_user gen_op_dcbz_l128_user
-#define gen_op_dcbz_l128_le_kernel gen_op_dcbz_l128_kernel
-#define gen_op_dcbz_l128_le_hypv gen_op_dcbz_l128_hypv
-#define gen_op_dcbz_l128_le_64_raw gen_op_dcbz_l128_64_raw
-#define gen_op_dcbz_l128_le_64_user gen_op_dcbz_l128_64_user
-#define gen_op_dcbz_l128_le_64_kernel gen_op_dcbz_l128_64_kernel
-#define gen_op_dcbz_l128_le_64_hypv gen_op_dcbz_l128_64_hypv
- GEN_MEM_FUNCS(dcbz_l128),
- },
- /* tunable cache line size */
- {
-#define gen_op_dcbz_le_raw gen_op_dcbz_raw
-#define gen_op_dcbz_le_user gen_op_dcbz_user
-#define gen_op_dcbz_le_kernel gen_op_dcbz_kernel
-#define gen_op_dcbz_le_hypv gen_op_dcbz_hypv
-#define gen_op_dcbz_le_64_raw gen_op_dcbz_64_raw
-#define gen_op_dcbz_le_64_user gen_op_dcbz_64_user
-#define gen_op_dcbz_le_64_kernel gen_op_dcbz_64_kernel
-#define gen_op_dcbz_le_64_hypv gen_op_dcbz_64_hypv
- GEN_MEM_FUNCS(dcbz),
- },
-};
-
-static always_inline void handler_dcbz (DisasContext *ctx,
- int dcache_line_size)
-{
- int n;
-
- switch (dcache_line_size) {
- case 32:
- n = 0;
- break;
- case 64:
- n = 1;
- break;
- case 128:
- n = 2;
- break;
- default:
- n = 3;
- break;
- }
- op_dcbz(n);
-}
-
GEN_HANDLER(dcbz, 0x1F, 0x16, 0x1F, 0x03E00001, PPC_CACHE_DCBZ)
{
- gen_addr_reg_index(cpu_T[0], ctx);
- handler_dcbz(ctx, ctx->dcache_line_size);
- gen_op_check_reservation();
+ TCGv t0 = tcg_temp_new();
+ gen_addr_reg_index(t0, ctx);
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
+ gen_helper_dcbz(t0);
+ tcg_temp_free(t0);
}
GEN_HANDLER2(dcbz_970, "dcbz", 0x1F, 0x16, 0x1F, 0x03C00001, PPC_CACHE_DCBZT)
{
- gen_addr_reg_index(cpu_T[0], ctx);
+ TCGv t0 = tcg_temp_new();
+ gen_addr_reg_index(t0, ctx);
+ /* NIP cannot be restored if the memory exception comes from an helper */
+ gen_update_nip(ctx, ctx->nip - 4);
if (ctx->opcode & 0x00200000)
- handler_dcbz(ctx, ctx->dcache_line_size);
+ gen_helper_dcbz(t0);
else
- handler_dcbz(ctx, -1);
- gen_op_check_reservation();
+ gen_helper_dcbz_970(t0);
+ tcg_temp_free(t0);
}
/* icbi */
-#define op_icbi() (*gen_op_icbi[ctx->mem_idx])()
-#define gen_op_icbi_le_raw gen_op_icbi_raw
-#define gen_op_icbi_le_user gen_op_icbi_user
-#define gen_op_icbi_le_kernel gen_op_icbi_kernel
-#define gen_op_icbi_le_hypv gen_op_icbi_hypv
-#define gen_op_icbi_le_64_raw gen_op_icbi_64_raw
-#define gen_op_icbi_le_64_user gen_op_icbi_64_user
-#define gen_op_icbi_le_64_kernel gen_op_icbi_64_kernel
-#define gen_op_icbi_le_64_hypv gen_op_icbi_64_hypv
-static GenOpFunc *gen_op_icbi[NB_MEM_FUNCS] = {
- GEN_MEM_FUNCS(icbi),
-};
-
GEN_HANDLER(icbi, 0x1F, 0x16, 0x1E, 0x03E00001, PPC_CACHE_ICBI)
{
+ TCGv t0 = tcg_temp_new();
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_reg_index(cpu_T[0], ctx);
- op_icbi();
+ gen_addr_reg_index(t0, ctx);
+ gen_helper_icbi(t0);
+ tcg_temp_free(t0);
}
/* Optional: */
@@ -4415,31 +4396,28 @@ GEN_HANDLER(slbie, 0x1F, 0x12, 0x0D, 0x03FF0001, PPC_SLBI)
/*** External control ***/
/* Optional: */
-#define op_eciwx() (*gen_op_eciwx[ctx->mem_idx])()
-#define op_ecowx() (*gen_op_ecowx[ctx->mem_idx])()
-static GenOpFunc *gen_op_eciwx[NB_MEM_FUNCS] = {
- GEN_MEM_FUNCS(eciwx),
-};
-static GenOpFunc *gen_op_ecowx[NB_MEM_FUNCS] = {
- GEN_MEM_FUNCS(ecowx),
-};
-
/* eciwx */
GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN)
{
- /* Should check EAR[E] & alignment ! */
- gen_addr_reg_index(cpu_T[0], ctx);
- op_eciwx();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ /* Should check EAR[E] ! */
+ TCGv t0 = tcg_temp_new();
+ gen_set_access_type(ACCESS_RES);
+ gen_addr_reg_index(t0, ctx);
+ gen_check_align(ctx, t0, 0x03);
+ gen_qemu_ld32u(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx);
+ tcg_temp_free(t0);
}
/* ecowx */
GEN_HANDLER(ecowx, 0x1F, 0x16, 0x09, 0x00000001, PPC_EXTERN)
{
- /* Should check EAR[E] & alignment ! */
- gen_addr_reg_index(cpu_T[0], ctx);
- tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rS(ctx->opcode)]);
- op_ecowx();
+ /* Should check EAR[E] ! */
+ TCGv t0 = tcg_temp_new();
+ gen_set_access_type(ACCESS_RES);
+ gen_addr_reg_index(t0, ctx);
+ gen_check_align(ctx, t0, 0x03);
+ gen_qemu_st32(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx);
+ tcg_temp_free(t0);
}
/* PowerPC 601 specific instructions */
@@ -4547,47 +4525,26 @@ GEN_HANDLER(dozi, 0x09, 0xFF, 0xFF, 0x00000000, PPC_POWER_BR)
tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
}
-/* As lscbx load from memory byte after byte, it's always endian safe.
- * Original POWER is 32 bits only, define 64 bits ops as 32 bits ones
- */
-#define op_POWER_lscbx(start, ra, rb) \
-(*gen_op_POWER_lscbx[ctx->mem_idx])(start, ra, rb)
-#define gen_op_POWER_lscbx_64_raw gen_op_POWER_lscbx_raw
-#define gen_op_POWER_lscbx_64_user gen_op_POWER_lscbx_user
-#define gen_op_POWER_lscbx_64_kernel gen_op_POWER_lscbx_kernel
-#define gen_op_POWER_lscbx_64_hypv gen_op_POWER_lscbx_hypv
-#define gen_op_POWER_lscbx_le_raw gen_op_POWER_lscbx_raw
-#define gen_op_POWER_lscbx_le_user gen_op_POWER_lscbx_user
-#define gen_op_POWER_lscbx_le_kernel gen_op_POWER_lscbx_kernel
-#define gen_op_POWER_lscbx_le_hypv gen_op_POWER_lscbx_hypv
-#define gen_op_POWER_lscbx_le_64_raw gen_op_POWER_lscbx_raw
-#define gen_op_POWER_lscbx_le_64_user gen_op_POWER_lscbx_user
-#define gen_op_POWER_lscbx_le_64_kernel gen_op_POWER_lscbx_kernel
-#define gen_op_POWER_lscbx_le_64_hypv gen_op_POWER_lscbx_hypv
-static GenOpFunc3 *gen_op_POWER_lscbx[NB_MEM_FUNCS] = {
- GEN_MEM_FUNCS(POWER_lscbx),
-};
-
/* lscbx - lscbx. */
GEN_HANDLER(lscbx, 0x1F, 0x15, 0x08, 0x00000000, PPC_POWER_BR)
{
- int ra = rA(ctx->opcode);
- int rb = rB(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_const_i32(rD(ctx->opcode));
+ TCGv_i32 t2 = tcg_const_i32(rA(ctx->opcode));
+ TCGv_i32 t3 = tcg_const_i32(rB(ctx->opcode));
- gen_addr_reg_index(cpu_T[0], ctx);
- if (ra == 0) {
- ra = rb;
- }
+ gen_addr_reg_index(t0, ctx);
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
- tcg_gen_andi_tl(cpu_T[1], cpu_xer, 0x7F);
- tcg_gen_shri_tl(cpu_T[2], cpu_xer, XER_CMP);
- tcg_gen_andi_tl(cpu_T[2], cpu_T[2], 0xFF);
- op_POWER_lscbx(rD(ctx->opcode), ra, rb);
+ gen_helper_lscbx(t0, t0, t1, t2, t3);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
tcg_gen_andi_tl(cpu_xer, cpu_xer, ~0x7F);
- tcg_gen_or_tl(cpu_xer, cpu_xer, cpu_T[0]);
+ tcg_gen_or_tl(cpu_xer, cpu_xer, t0);
if (unlikely(Rc(ctx->opcode) != 0))
- gen_set_Rc0(ctx, cpu_T[0]);
+ gen_set_Rc0(ctx, t0);
+ tcg_temp_free(t0);
}
/* maskg - maskg. */
@@ -4874,9 +4831,7 @@ GEN_HANDLER(mfrom, 0x1F, 0x09, 0x08, 0x03E0F801, PPC_602_SPEC)
GEN_EXCP_PRIVOPC(ctx);
return;
}
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]);
- gen_op_602_mfrom();
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]);
+ gen_helper_602_mfrom(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
#endif
}
@@ -4891,8 +4846,7 @@ GEN_HANDLER2(tlbld_6xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_6xx_TLB)
GEN_EXCP_PRIVOPC(ctx);
return;
}
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rB(ctx->opcode)]);
- gen_op_6xx_tlbld();
+ gen_helper_load_6xx_tlbd(cpu_gpr[rB(ctx->opcode)]);
#endif
}
@@ -4906,8 +4860,7 @@ GEN_HANDLER2(tlbli_6xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_6xx_TLB)
GEN_EXCP_PRIVOPC(ctx);
return;
}
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rB(ctx->opcode)]);
- gen_op_6xx_tlbli();
+ gen_helper_load_6xx_tlbi(cpu_gpr[rB(ctx->opcode)]);
#endif
}
@@ -4922,8 +4875,7 @@ GEN_HANDLER2(tlbld_74xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_74xx_TLB)
GEN_EXCP_PRIVOPC(ctx);
return;
}
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rB(ctx->opcode)]);
- gen_op_74xx_tlbld();
+ gen_helper_load_74xx_tlbd(cpu_gpr[rB(ctx->opcode)]);
#endif
}
@@ -4937,8 +4889,7 @@ GEN_HANDLER2(tlbli_74xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_74xx_TLB)
GEN_EXCP_PRIVOPC(ctx);
return;
}
- tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rB(ctx->opcode)]);
- gen_op_74xx_tlbli();
+ gen_helper_load_74xx_tlbi(cpu_gpr[rB(ctx->opcode)]);
#endif
}
@@ -5013,7 +4964,7 @@ GEN_HANDLER(rfsvc, 0x13, 0x12, 0x02, 0x03FFF0001, PPC_POWER)
GEN_EXCP_PRIVOPC(ctx);
return;
}
- gen_op_POWER_rfsvc();
+ gen_helper_rfsvc();
GEN_SYNC(ctx);
#endif
}
@@ -5022,134 +4973,121 @@ GEN_HANDLER(rfsvc, 0x13, 0x12, 0x02, 0x03FFF0001, PPC_POWER)
/* POWER2 specific instructions */
/* Quad manipulation (load/store two floats at a time) */
-/* Original POWER2 is 32 bits only, define 64 bits ops as 32 bits ones */
-#define op_POWER2_lfq() (*gen_op_POWER2_lfq[ctx->mem_idx])()
-#define op_POWER2_stfq() (*gen_op_POWER2_stfq[ctx->mem_idx])()
-#define gen_op_POWER2_lfq_64_raw gen_op_POWER2_lfq_raw
-#define gen_op_POWER2_lfq_64_user gen_op_POWER2_lfq_user
-#define gen_op_POWER2_lfq_64_kernel gen_op_POWER2_lfq_kernel
-#define gen_op_POWER2_lfq_64_hypv gen_op_POWER2_lfq_hypv
-#define gen_op_POWER2_lfq_le_64_raw gen_op_POWER2_lfq_le_raw
-#define gen_op_POWER2_lfq_le_64_user gen_op_POWER2_lfq_le_user
-#define gen_op_POWER2_lfq_le_64_kernel gen_op_POWER2_lfq_le_kernel
-#define gen_op_POWER2_lfq_le_64_hypv gen_op_POWER2_lfq_le_hypv
-#define gen_op_POWER2_stfq_64_raw gen_op_POWER2_stfq_raw
-#define gen_op_POWER2_stfq_64_user gen_op_POWER2_stfq_user
-#define gen_op_POWER2_stfq_64_kernel gen_op_POWER2_stfq_kernel
-#define gen_op_POWER2_stfq_64_hypv gen_op_POWER2_stfq_hypv
-#define gen_op_POWER2_stfq_le_64_raw gen_op_POWER2_stfq_le_raw
-#define gen_op_POWER2_stfq_le_64_user gen_op_POWER2_stfq_le_user
-#define gen_op_POWER2_stfq_le_64_kernel gen_op_POWER2_stfq_le_kernel
-#define gen_op_POWER2_stfq_le_64_hypv gen_op_POWER2_stfq_le_hypv
-static GenOpFunc *gen_op_POWER2_lfq[NB_MEM_FUNCS] = {
- GEN_MEM_FUNCS(POWER2_lfq),
-};
-static GenOpFunc *gen_op_POWER2_stfq[NB_MEM_FUNCS] = {
- GEN_MEM_FUNCS(POWER2_stfq),
-};
/* lfq */
GEN_HANDLER(lfq, 0x38, 0xFF, 0xFF, 0x00000003, PPC_POWER2)
{
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_imm_index(cpu_T[0], ctx, 0);
- op_POWER2_lfq();
- tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_FT[0]);
- tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode) + 1], cpu_FT[1]);
+ int rd = rD(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ gen_addr_imm_index(t0, ctx, 0);
+ gen_qemu_ld64(cpu_fpr[rd], t0, ctx->mem_idx);
+ tcg_gen_addi_tl(t0, t0, 8);
+ gen_qemu_ld64(cpu_fpr[(rd + 1) % 32], t0, ctx->mem_idx);
+ tcg_temp_free(t0);
}
/* lfqu */
GEN_HANDLER(lfqu, 0x39, 0xFF, 0xFF, 0x00000003, PPC_POWER2)
{
int ra = rA(ctx->opcode);
-
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_imm_index(cpu_T[0], ctx, 0);
- op_POWER2_lfq();
- tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_FT[0]);
- tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode) + 1], cpu_FT[1]);
+ int rd = rD(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ gen_addr_imm_index(t0, ctx, 0);
+ gen_qemu_ld64(cpu_fpr[rd], t0, ctx->mem_idx);
+ tcg_gen_addi_tl(t1, t0, 8);
+ gen_qemu_ld64(cpu_fpr[(rd + 1) % 32], t1, ctx->mem_idx);
if (ra != 0)
- tcg_gen_mov_tl(cpu_gpr[ra], cpu_T[0]);
+ tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
}
/* lfqux */
GEN_HANDLER(lfqux, 0x1F, 0x17, 0x19, 0x00000001, PPC_POWER2)
{
int ra = rA(ctx->opcode);
-
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_reg_index(cpu_T[0], ctx);
- op_POWER2_lfq();
- tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_FT[0]);
- tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode) + 1], cpu_FT[1]);
+ int rd = rD(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ gen_addr_reg_index(t0, ctx);
+ gen_qemu_ld64(cpu_fpr[rd], t0, ctx->mem_idx);
+ tcg_gen_addi_tl(t1, t0, 8);
+ gen_qemu_ld64(cpu_fpr[(rd + 1) % 32], t1, ctx->mem_idx);
if (ra != 0)
- tcg_gen_mov_tl(cpu_gpr[ra], cpu_T[0]);
+ tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
}
/* lfqx */
GEN_HANDLER(lfqx, 0x1F, 0x17, 0x18, 0x00000001, PPC_POWER2)
{
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_reg_index(cpu_T[0], ctx);
- op_POWER2_lfq();
- tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_FT[0]);
- tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode) + 1], cpu_FT[1]);
+ int rd = rD(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ gen_addr_reg_index(t0, ctx);
+ gen_qemu_ld64(cpu_fpr[rd], t0, ctx->mem_idx);
+ tcg_gen_addi_tl(t0, t0, 8);
+ gen_qemu_ld64(cpu_fpr[(rd + 1) % 32], t0, ctx->mem_idx);
+ tcg_temp_free(t0);
}
/* stfq */
GEN_HANDLER(stfq, 0x3C, 0xFF, 0xFF, 0x00000003, PPC_POWER2)
{
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_imm_index(cpu_T[0], ctx, 0);
- tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rS(ctx->opcode)]);
- tcg_gen_mov_i64(cpu_FT[1], cpu_fpr[rS(ctx->opcode) + 1]);
- op_POWER2_stfq();
+ int rd = rD(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ gen_addr_imm_index(t0, ctx, 0);
+ gen_qemu_st64(cpu_fpr[rd], t0, ctx->mem_idx);
+ tcg_gen_addi_tl(t0, t0, 8);
+ gen_qemu_st64(cpu_fpr[(rd + 1) % 32], t0, ctx->mem_idx);
+ tcg_temp_free(t0);
}
/* stfqu */
GEN_HANDLER(stfqu, 0x3D, 0xFF, 0xFF, 0x00000003, PPC_POWER2)
{
int ra = rA(ctx->opcode);
-
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_imm_index(cpu_T[0], ctx, 0);
- tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rS(ctx->opcode)]);
- tcg_gen_mov_i64(cpu_FT[1], cpu_fpr[rS(ctx->opcode) + 1]);
- op_POWER2_stfq();
+ int rd = rD(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ gen_addr_imm_index(t0, ctx, 0);
+ gen_qemu_st64(cpu_fpr[rd], t0, ctx->mem_idx);
+ tcg_gen_addi_tl(t1, t0, 8);
+ gen_qemu_st64(cpu_fpr[(rd + 1) % 32], t1, ctx->mem_idx);
if (ra != 0)
- tcg_gen_mov_tl(cpu_gpr[ra], cpu_T[0]);
+ tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
}
/* stfqux */
GEN_HANDLER(stfqux, 0x1F, 0x17, 0x1D, 0x00000001, PPC_POWER2)
{
int ra = rA(ctx->opcode);
-
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_reg_index(cpu_T[0], ctx);
- tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rS(ctx->opcode)]);
- tcg_gen_mov_i64(cpu_FT[1], cpu_fpr[rS(ctx->opcode) + 1]);
- op_POWER2_stfq();
+ int rd = rD(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ gen_addr_reg_index(t0, ctx);
+ gen_qemu_st64(cpu_fpr[rd], t0, ctx->mem_idx);
+ tcg_gen_addi_tl(t1, t0, 8);
+ gen_qemu_st64(cpu_fpr[(rd + 1) % 32], t1, ctx->mem_idx);
if (ra != 0)
- tcg_gen_mov_tl(cpu_gpr[ra], cpu_T[0]);
+ tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
}
/* stfqx */
GEN_HANDLER(stfqx, 0x1F, 0x17, 0x1C, 0x00000001, PPC_POWER2)
{
- /* NIP cannot be restored if the memory exception comes from an helper */
- gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_reg_index(cpu_T[0], ctx);
- tcg_gen_mov_i64(cpu_FT[0], cpu_fpr[rS(ctx->opcode)]);
- tcg_gen_mov_i64(cpu_FT[1], cpu_fpr[rS(ctx->opcode) + 1]);
- op_POWER2_stfq();
+ int rd = rD(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ gen_addr_reg_index(t0, ctx);
+ gen_qemu_st64(cpu_fpr[rd], t0, ctx->mem_idx);
+ tcg_gen_addi_tl(t0, t0, 8);
+ gen_qemu_st64(cpu_fpr[(rd + 1) % 32], t0, ctx->mem_idx);
+ tcg_temp_free(t0);
}
/* BookE specific instructions */
@@ -5512,6 +5450,7 @@ GEN_HANDLER(dcread, 0x1F, 0x06, 0x0F, 0x00000001, PPC_4xx_COMMON)
return;
}
EA = tcg_temp_new();
+ gen_set_access_type(ACCESS_CACHE);
gen_addr_reg_index(EA, ctx);
val = tcg_temp_new();
gen_qemu_ld32u(val, EA, ctx->mem_idx);
@@ -5569,7 +5508,7 @@ GEN_HANDLER2(rfci_40x, "rfci", 0x13, 0x13, 0x01, 0x03FF8001, PPC_40x_EXCP)
return;
}
/* Restore CPU state */
- gen_op_40x_rfci();
+ gen_helper_40x_rfci();
GEN_SYNC(ctx);
#endif
}
@@ -5584,7 +5523,7 @@ GEN_HANDLER(rfci, 0x13, 0x13, 0x01, 0x03FF8001, PPC_BOOKE)
return;
}
/* Restore CPU state */
- gen_op_rfci();
+ gen_helper_rfci();
GEN_SYNC(ctx);
#endif
}
@@ -5601,7 +5540,7 @@ GEN_HANDLER(rfdi, 0x13, 0x07, 0x01, 0x03FF8001, PPC_RFDI)
return;
}
/* Restore CPU state */
- gen_op_rfdi();
+ gen_helper_rfdi();
GEN_SYNC(ctx);
#endif
}
@@ -5617,7 +5556,7 @@ GEN_HANDLER(rfmci, 0x13, 0x06, 0x01, 0x03FF8001, PPC_RFMCI)
return;
}
/* Restore CPU state */
- gen_op_rfmci();
+ gen_helper_rfmci();
GEN_SYNC(ctx);
#endif
}
@@ -5845,61 +5784,59 @@ GEN_HANDLER2(icbt_440, "icbt", 0x1F, 0x16, 0x00, 0x03E00001, PPC_BOOKE)
/*** Altivec vector extension ***/
/* Altivec registers moves */
-static always_inline void gen_load_avr(int t, int reg) {
- tcg_gen_mov_i64(cpu_AVRh[t], cpu_avrh[reg]);
- tcg_gen_mov_i64(cpu_AVRl[t], cpu_avrl[reg]);
-}
-
-static always_inline void gen_store_avr(int reg, int t) {
- tcg_gen_mov_i64(cpu_avrh[reg], cpu_AVRh[t]);
- tcg_gen_mov_i64(cpu_avrl[reg], cpu_AVRl[t]);
-}
-
-#define op_vr_ldst(name) (*gen_op_##name[ctx->mem_idx])()
-#define OP_VR_LD_TABLE(name) \
-static GenOpFunc *gen_op_vr_l##name[NB_MEM_FUNCS] = { \
- GEN_MEM_FUNCS(vr_l##name), \
-};
-#define OP_VR_ST_TABLE(name) \
-static GenOpFunc *gen_op_vr_st##name[NB_MEM_FUNCS] = { \
- GEN_MEM_FUNCS(vr_st##name), \
-};
-
#define GEN_VR_LDX(name, opc2, opc3) \
-GEN_HANDLER(l##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC) \
+GEN_HANDLER(name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC) \
{ \
+ TCGv EA; \
if (unlikely(!ctx->altivec_enabled)) { \
GEN_EXCP_NO_VR(ctx); \
return; \
} \
- gen_addr_reg_index(cpu_T[0], ctx); \
- op_vr_ldst(vr_l##name); \
- gen_store_avr(rD(ctx->opcode), 0); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(EA, ctx); \
+ tcg_gen_andi_tl(EA, EA, ~0xf); \
+ if (ctx->mem_idx & 1) { \
+ gen_qemu_ld64(cpu_avrl[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ tcg_gen_addi_tl(EA, EA, 8); \
+ gen_qemu_ld64(cpu_avrh[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ } else { \
+ gen_qemu_ld64(cpu_avrh[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ tcg_gen_addi_tl(EA, EA, 8); \
+ gen_qemu_ld64(cpu_avrl[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ } \
+ tcg_temp_free(EA); \
}
#define GEN_VR_STX(name, opc2, opc3) \
GEN_HANDLER(st##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC) \
{ \
+ TCGv EA; \
if (unlikely(!ctx->altivec_enabled)) { \
GEN_EXCP_NO_VR(ctx); \
return; \
} \
- gen_addr_reg_index(cpu_T[0], ctx); \
- gen_load_avr(0, rS(ctx->opcode)); \
- op_vr_ldst(vr_st##name); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(EA, ctx); \
+ tcg_gen_andi_tl(EA, EA, ~0xf); \
+ if (ctx->mem_idx & 1) { \
+ gen_qemu_st64(cpu_avrl[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ tcg_gen_addi_tl(EA, EA, 8); \
+ gen_qemu_st64(cpu_avrh[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ } else { \
+ gen_qemu_st64(cpu_avrh[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ tcg_gen_addi_tl(EA, EA, 8); \
+ gen_qemu_st64(cpu_avrl[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ } \
+ tcg_temp_free(EA); \
}
-OP_VR_LD_TABLE(vx);
-GEN_VR_LDX(vx, 0x07, 0x03);
+GEN_VR_LDX(lvx, 0x07, 0x03);
/* As we don't emulate the cache, lvxl is stricly equivalent to lvx */
-#define gen_op_vr_lvxl gen_op_vr_lvx
-GEN_VR_LDX(vxl, 0x07, 0x0B);
+GEN_VR_LDX(lvxl, 0x07, 0x0B);
-OP_VR_ST_TABLE(vx);
-GEN_VR_STX(vx, 0x07, 0x07);
+GEN_VR_STX(svx, 0x07, 0x07);
/* As we don't emulate the cache, stvxl is stricly equivalent to stvx */
-#define gen_op_vr_stvxl gen_op_vr_stvx
-GEN_VR_STX(vxl, 0x07, 0x0F);
+GEN_VR_STX(svxl, 0x07, 0x0F);
/*** SPE extension ***/
/* Register moves */
@@ -5939,91 +5876,6 @@ static always_inline void gen_speundef (DisasContext *ctx)
GEN_EXCP_INVAL(ctx);
}
-/* SPE load and stores */
-static always_inline void gen_addr_spe_imm_index (TCGv EA, DisasContext *ctx, int sh)
-{
- target_long simm = rB(ctx->opcode);
-
- if (rA(ctx->opcode) == 0)
- tcg_gen_movi_tl(EA, simm << sh);
- else if (likely(simm != 0))
- tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm << sh);
- else
- tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
-}
-
-#define op_spe_ldst(name) (*gen_op_##name[ctx->mem_idx])()
-#define OP_SPE_LD_TABLE(name) \
-static GenOpFunc *gen_op_spe_l##name[NB_MEM_FUNCS] = { \
- GEN_MEM_FUNCS(spe_l##name), \
-};
-#define OP_SPE_ST_TABLE(name) \
-static GenOpFunc *gen_op_spe_st##name[NB_MEM_FUNCS] = { \
- GEN_MEM_FUNCS(spe_st##name), \
-};
-
-#define GEN_SPE_LD(name, sh) \
-static always_inline void gen_evl##name (DisasContext *ctx) \
-{ \
- if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
- return; \
- } \
- gen_addr_spe_imm_index(cpu_T[0], ctx, sh); \
- op_spe_ldst(spe_l##name); \
- gen_store_gpr64(rD(ctx->opcode), cpu_T64[1]); \
-}
-
-#define GEN_SPE_LDX(name) \
-static always_inline void gen_evl##name##x (DisasContext *ctx) \
-{ \
- if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
- return; \
- } \
- gen_addr_reg_index(cpu_T[0], ctx); \
- op_spe_ldst(spe_l##name); \
- gen_store_gpr64(rD(ctx->opcode), cpu_T64[1]); \
-}
-
-#define GEN_SPEOP_LD(name, sh) \
-OP_SPE_LD_TABLE(name); \
-GEN_SPE_LD(name, sh); \
-GEN_SPE_LDX(name)
-
-#define GEN_SPE_ST(name, sh) \
-static always_inline void gen_evst##name (DisasContext *ctx) \
-{ \
- if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
- return; \
- } \
- gen_addr_spe_imm_index(cpu_T[0], ctx, sh); \
- gen_load_gpr64(cpu_T64[1], rS(ctx->opcode)); \
- op_spe_ldst(spe_st##name); \
-}
-
-#define GEN_SPE_STX(name) \
-static always_inline void gen_evst##name##x (DisasContext *ctx) \
-{ \
- if (unlikely(!ctx->spe_enabled)) { \
- GEN_EXCP_NO_AP(ctx); \
- return; \
- } \
- gen_addr_reg_index(cpu_T[0], ctx); \
- gen_load_gpr64(cpu_T64[1], rS(ctx->opcode)); \
- op_spe_ldst(spe_st##name); \
-}
-
-#define GEN_SPEOP_ST(name, sh) \
-OP_SPE_ST_TABLE(name); \
-GEN_SPE_ST(name, sh); \
-GEN_SPE_STX(name)
-
-#define GEN_SPEOP_LDST(name, sh) \
-GEN_SPEOP_LD(name, sh); \
-GEN_SPEOP_ST(name, sh)
-
/* SPE logic */
#if defined(TARGET_PPC64)
#define GEN_SPEOP_LOGIC2(name, tcg_op) \
@@ -6477,16 +6329,10 @@ static always_inline void gen_evmergelohi (DisasContext *ctx)
}
static always_inline void gen_evsplati (DisasContext *ctx)
{
- int32_t imm = (int32_t)(rA(ctx->opcode) << 11) >> 27;
+ uint64_t imm = ((int32_t)(rA(ctx->opcode) << 11)) >> 27;
#if defined(TARGET_PPC64)
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- tcg_gen_movi_tl(t0, imm);
- tcg_gen_shri_tl(t1, t0, 32);
- tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], t0, t1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], (imm << 32) | imm);
#else
tcg_gen_movi_i32(cpu_gpr[rD(ctx->opcode)], imm);
tcg_gen_movi_i32(cpu_gprh[rD(ctx->opcode)], imm);
@@ -6494,16 +6340,10 @@ static always_inline void gen_evsplati (DisasContext *ctx)
}
static always_inline void gen_evsplatfi (DisasContext *ctx)
{
- uint32_t imm = rA(ctx->opcode) << 11;
+ uint64_t imm = rA(ctx->opcode) << 11;
#if defined(TARGET_PPC64)
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- tcg_gen_movi_tl(t0, imm);
- tcg_gen_shri_tl(t1, t0, 32);
- tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], t0, t1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], (imm << 32) | imm);
#else
tcg_gen_movi_i32(cpu_gpr[rD(ctx->opcode)], imm);
tcg_gen_movi_i32(cpu_gprh[rD(ctx->opcode)], imm);
@@ -6601,188 +6441,352 @@ GEN_SPE(evcmpgtu, evcmpgts, 0x18, 0x08, 0x00600000, PPC_SPE); ////
GEN_SPE(evcmpltu, evcmplts, 0x19, 0x08, 0x00600000, PPC_SPE); ////
GEN_SPE(evcmpeq, speundef, 0x1A, 0x08, 0x00600000, PPC_SPE); ////
-/* Load and stores */
-GEN_SPEOP_LDST(dd, 3);
-GEN_SPEOP_LDST(dw, 3);
-GEN_SPEOP_LDST(dh, 3);
-GEN_SPEOP_LDST(whe, 2);
-GEN_SPEOP_LD(whou, 2);
-GEN_SPEOP_LD(whos, 2);
-GEN_SPEOP_ST(who, 2);
-
-#define _GEN_OP_SPE_STWWE(suffix) \
-static always_inline void gen_op_spe_stwwe_##suffix (void) \
-{ \
- gen_op_srli32_T1_64(); \
- gen_op_spe_stwwo_##suffix(); \
+/* SPE load and stores */
+static always_inline void gen_addr_spe_imm_index (TCGv EA, DisasContext *ctx, int sh)
+{
+ target_ulong uimm = rB(ctx->opcode);
+
+ if (rA(ctx->opcode) == 0)
+ tcg_gen_movi_tl(EA, uimm << sh);
+ else
+ tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], uimm << sh);
}
-#define _GEN_OP_SPE_STWWE_LE(suffix) \
-static always_inline void gen_op_spe_stwwe_le_##suffix (void) \
-{ \
- gen_op_srli32_T1_64(); \
- gen_op_spe_stwwo_le_##suffix(); \
+
+static always_inline void gen_op_evldd(DisasContext *ctx, TCGv addr)
+{
+#if defined(TARGET_PPC64)
+ gen_qemu_ld64(cpu_gpr[rD(ctx->opcode)], addr, ctx->mem_idx);
+#else
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ gen_qemu_ld64(t0, addr, ctx->mem_idx);
+ tcg_gen_trunc_i64_i32(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_trunc_i64_i32(cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_temp_free_i64(t0);
+#endif
}
+
+static always_inline void gen_op_evldw(DisasContext *ctx, TCGv addr)
+{
#if defined(TARGET_PPC64)
-#define GEN_OP_SPE_STWWE(suffix) \
-_GEN_OP_SPE_STWWE(suffix); \
-_GEN_OP_SPE_STWWE_LE(suffix); \
-static always_inline void gen_op_spe_stwwe_64_##suffix (void) \
-{ \
- gen_op_srli32_T1_64(); \
- gen_op_spe_stwwo_64_##suffix(); \
-} \
-static always_inline void gen_op_spe_stwwe_le_64_##suffix (void) \
-{ \
- gen_op_srli32_T1_64(); \
- gen_op_spe_stwwo_le_64_##suffix(); \
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld32u(t0, addr, ctx->mem_idx);
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 32);
+ tcg_gen_addi_tl(addr, addr, 4);
+ gen_qemu_ld32u(t0, addr, ctx->mem_idx);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+#else
+ gen_qemu_ld32u(cpu_gprh[rD(ctx->opcode)], addr, ctx->mem_idx);
+ tcg_gen_addi_tl(addr, addr, 4);
+ gen_qemu_ld32u(cpu_gpr[rD(ctx->opcode)], addr, ctx->mem_idx);
+#endif
}
+
+static always_inline void gen_op_evldh(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+#if defined(TARGET_PPC64)
+ gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 48);
+ tcg_gen_addi_tl(addr, addr, 2);
+ gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ tcg_gen_shli_tl(t0, t0, 32);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_gen_addi_tl(addr, addr, 2);
+ gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ tcg_gen_shli_tl(t0, t0, 16);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_gen_addi_tl(addr, addr, 2);
+ gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
#else
-#define GEN_OP_SPE_STWWE(suffix) \
-_GEN_OP_SPE_STWWE(suffix); \
-_GEN_OP_SPE_STWWE_LE(suffix)
+ gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
+ tcg_gen_addi_tl(addr, addr, 2);
+ gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ tcg_gen_or_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_gen_addi_tl(addr, addr, 2);
+ gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
+ tcg_gen_addi_tl(addr, addr, 2);
+ gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
#endif
-#if defined(CONFIG_USER_ONLY)
-GEN_OP_SPE_STWWE(raw);
-#else /* defined(CONFIG_USER_ONLY) */
-GEN_OP_SPE_STWWE(user);
-GEN_OP_SPE_STWWE(kernel);
-GEN_OP_SPE_STWWE(hypv);
-#endif /* defined(CONFIG_USER_ONLY) */
-GEN_SPEOP_ST(wwe, 2);
-GEN_SPEOP_ST(wwo, 2);
-
-#define GEN_SPE_LDSPLAT(name, op, suffix) \
-static always_inline void gen_op_spe_l##name##_##suffix (void) \
-{ \
- gen_op_##op##_##suffix(); \
- gen_op_splatw_T1_64(); \
+ tcg_temp_free(t0);
}
-#define GEN_OP_SPE_LHE(suffix) \
-static always_inline void gen_op_spe_lhe_##suffix (void) \
-{ \
- gen_op_spe_lh_##suffix(); \
- gen_op_sli16_T1_64(); \
+static always_inline void gen_op_evlhhesplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+#if defined(TARGET_PPC64)
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 48);
+ tcg_gen_shli_tl(t0, t0, 16);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+#else
+ tcg_gen_shli_tl(t0, t0, 16);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+#endif
+ tcg_temp_free(t0);
}
-#define GEN_OP_SPE_LHX(suffix) \
-static always_inline void gen_op_spe_lhx_##suffix (void) \
-{ \
- gen_op_spe_lh_##suffix(); \
- gen_op_extsh_T1_64(); \
+static always_inline void gen_op_evlhhousplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+#if defined(TARGET_PPC64)
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 32);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+#else
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+#endif
+ tcg_temp_free(t0);
}
-#if defined(CONFIG_USER_ONLY)
-GEN_OP_SPE_LHE(raw);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, raw);
-GEN_OP_SPE_LHE(le_raw);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_raw);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, raw);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_raw);
-GEN_OP_SPE_LHX(raw);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, raw);
-GEN_OP_SPE_LHX(le_raw);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_raw);
+static always_inline void gen_op_evlhhossplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16s(t0, addr, ctx->mem_idx);
#if defined(TARGET_PPC64)
-GEN_OP_SPE_LHE(64_raw);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, 64_raw);
-GEN_OP_SPE_LHE(le_64_raw);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_64_raw);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, 64_raw);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_64_raw);
-GEN_OP_SPE_LHX(64_raw);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, 64_raw);
-GEN_OP_SPE_LHX(le_64_raw);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_64_raw);
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 32);
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+#else
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
#endif
+ tcg_temp_free(t0);
+}
+
+static always_inline void gen_op_evlwhe(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+#if defined(TARGET_PPC64)
+ gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 48);
+ gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ tcg_gen_shli_tl(t0, t0, 16);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
#else
-GEN_OP_SPE_LHE(user);
-GEN_OP_SPE_LHE(kernel);
-GEN_OP_SPE_LHE(hypv);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, user);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, kernel);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, hypv);
-GEN_OP_SPE_LHE(le_user);
-GEN_OP_SPE_LHE(le_kernel);
-GEN_OP_SPE_LHE(le_hypv);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_user);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_kernel);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_hypv);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, user);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, kernel);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, hypv);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_user);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_kernel);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_hypv);
-GEN_OP_SPE_LHX(user);
-GEN_OP_SPE_LHX(kernel);
-GEN_OP_SPE_LHX(hypv);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, user);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, kernel);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, hypv);
-GEN_OP_SPE_LHX(le_user);
-GEN_OP_SPE_LHX(le_kernel);
-GEN_OP_SPE_LHX(le_hypv);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_user);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_kernel);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_hypv);
+ gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
+ tcg_gen_addi_tl(addr, addr, 2);
+ gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 16);
+#endif
+ tcg_temp_free(t0);
+}
+
+static always_inline void gen_op_evlwhou(DisasContext *ctx, TCGv addr)
+{
+#if defined(TARGET_PPC64)
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16u(cpu_gpr[rD(ctx->opcode)], addr, ctx->mem_idx);
+ tcg_gen_addi_tl(addr, addr, 2);
+ gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ tcg_gen_shli_tl(t0, t0, 32);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+#else
+ gen_qemu_ld16u(cpu_gprh[rD(ctx->opcode)], addr, ctx->mem_idx);
+ tcg_gen_addi_tl(addr, addr, 2);
+ gen_qemu_ld16u(cpu_gpr[rD(ctx->opcode)], addr, ctx->mem_idx);
+#endif
+}
+
+static always_inline void gen_op_evlwhos(DisasContext *ctx, TCGv addr)
+{
#if defined(TARGET_PPC64)
-GEN_OP_SPE_LHE(64_user);
-GEN_OP_SPE_LHE(64_kernel);
-GEN_OP_SPE_LHE(64_hypv);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, 64_user);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, 64_kernel);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, 64_hypv);
-GEN_OP_SPE_LHE(le_64_user);
-GEN_OP_SPE_LHE(le_64_kernel);
-GEN_OP_SPE_LHE(le_64_hypv);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_64_user);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_64_kernel);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_64_hypv);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, 64_user);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, 64_kernel);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, 64_hypv);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_64_user);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_64_kernel);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_64_hypv);
-GEN_OP_SPE_LHX(64_user);
-GEN_OP_SPE_LHX(64_kernel);
-GEN_OP_SPE_LHX(64_hypv);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, 64_user);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, 64_kernel);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, 64_hypv);
-GEN_OP_SPE_LHX(le_64_user);
-GEN_OP_SPE_LHX(le_64_kernel);
-GEN_OP_SPE_LHX(le_64_hypv);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_64_user);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_64_kernel);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_64_hypv);
-#endif
-#endif
-GEN_SPEOP_LD(hhesplat, 1);
-GEN_SPEOP_LD(hhousplat, 1);
-GEN_SPEOP_LD(hhossplat, 1);
-GEN_SPEOP_LD(wwsplat, 2);
-GEN_SPEOP_LD(whsplat, 2);
-
-GEN_SPE(evlddx, evldd, 0x00, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evldwx, evldw, 0x01, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evldhx, evldh, 0x02, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evlhhesplatx, evlhhesplat, 0x04, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evlhhousplatx, evlhhousplat, 0x06, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evlhhossplatx, evlhhossplat, 0x07, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evlwhex, evlwhe, 0x08, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evlwhoux, evlwhou, 0x0A, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evlwhosx, evlwhos, 0x0B, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evlwwsplatx, evlwwsplat, 0x0C, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evlwhsplatx, evlwhsplat, 0x0E, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evstddx, evstdd, 0x10, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evstdwx, evstdw, 0x11, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evstdhx, evstdh, 0x12, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evstwhex, evstwhe, 0x18, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evstwhox, evstwho, 0x1A, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evstwwex, evstwwe, 0x1C, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evstwwox, evstwwo, 0x1E, 0x0C, 0x00000000, PPC_SPE); //
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16s(t0, addr, ctx->mem_idx);
+ tcg_gen_ext32u_tl(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_gen_addi_tl(addr, addr, 2);
+ gen_qemu_ld16s(t0, addr, ctx->mem_idx);
+ tcg_gen_shli_tl(t0, t0, 32);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+#else
+ gen_qemu_ld16s(cpu_gprh[rD(ctx->opcode)], addr, ctx->mem_idx);
+ tcg_gen_addi_tl(addr, addr, 2);
+ gen_qemu_ld16s(cpu_gpr[rD(ctx->opcode)], addr, ctx->mem_idx);
+#endif
+}
+
+static always_inline void gen_op_evlwwsplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld32u(t0, addr, ctx->mem_idx);
+#if defined(TARGET_PPC64)
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 32);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+#else
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+#endif
+ tcg_temp_free(t0);
+}
+
+static always_inline void gen_op_evlwhsplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+#if defined(TARGET_PPC64)
+ gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 48);
+ tcg_gen_shli_tl(t0, t0, 32);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_gen_addi_tl(addr, addr, 2);
+ gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_gen_shli_tl(t0, t0, 16);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+#else
+ gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
+ tcg_gen_or_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_gen_addi_tl(addr, addr, 2);
+ gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 16);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
+#endif
+ tcg_temp_free(t0);
+}
+
+static always_inline void gen_op_evstdd(DisasContext *ctx, TCGv addr)
+{
+#if defined(TARGET_PPC64)
+ gen_qemu_st64(cpu_gpr[rS(ctx->opcode)], addr, ctx->mem_idx);
+#else
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_concat_i32_i64(t0, cpu_gpr[rS(ctx->opcode)], cpu_gprh[rS(ctx->opcode)]);
+ gen_qemu_st64(t0, addr, ctx->mem_idx);
+ tcg_temp_free_i64(t0);
+#endif
+}
+
+static always_inline void gen_op_evstdw(DisasContext *ctx, TCGv addr)
+{
+#if defined(TARGET_PPC64)
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 32);
+ gen_qemu_st32(t0, addr, ctx->mem_idx);
+ tcg_temp_free(t0);
+#else
+ gen_qemu_st32(cpu_gprh[rS(ctx->opcode)], addr, ctx->mem_idx);
+#endif
+ tcg_gen_addi_tl(addr, addr, 4);
+ gen_qemu_st32(cpu_gpr[rS(ctx->opcode)], addr, ctx->mem_idx);
+}
+
+static always_inline void gen_op_evstdh(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+#if defined(TARGET_PPC64)
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 48);
+#else
+ tcg_gen_shri_tl(t0, cpu_gprh[rS(ctx->opcode)], 16);
+#endif
+ gen_qemu_st16(t0, addr, ctx->mem_idx);
+ tcg_gen_addi_tl(addr, addr, 2);
+#if defined(TARGET_PPC64)
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 32);
+ gen_qemu_st16(t0, addr, ctx->mem_idx);
+#else
+ gen_qemu_st16(cpu_gprh[rS(ctx->opcode)], addr, ctx->mem_idx);
+#endif
+ tcg_gen_addi_tl(addr, addr, 2);
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 16);
+ gen_qemu_st16(t0, addr, ctx->mem_idx);
+ tcg_temp_free(t0);
+ tcg_gen_addi_tl(addr, addr, 2);
+ gen_qemu_st16(cpu_gpr[rS(ctx->opcode)], addr, ctx->mem_idx);
+}
+
+static always_inline void gen_op_evstwhe(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+#if defined(TARGET_PPC64)
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 48);
+#else
+ tcg_gen_shri_tl(t0, cpu_gprh[rS(ctx->opcode)], 16);
+#endif
+ gen_qemu_st16(t0, addr, ctx->mem_idx);
+ tcg_gen_addi_tl(addr, addr, 2);
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 16);
+ gen_qemu_st16(t0, addr, ctx->mem_idx);
+ tcg_temp_free(t0);
+}
+
+static always_inline void gen_op_evstwho(DisasContext *ctx, TCGv addr)
+{
+#if defined(TARGET_PPC64)
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 32);
+ gen_qemu_st16(t0, addr, ctx->mem_idx);
+ tcg_temp_free(t0);
+#else
+ gen_qemu_st16(cpu_gprh[rS(ctx->opcode)], addr, ctx->mem_idx);
+#endif
+ tcg_gen_addi_tl(addr, addr, 2);
+ gen_qemu_st16(cpu_gpr[rS(ctx->opcode)], addr, ctx->mem_idx);
+}
+
+static always_inline void gen_op_evstwwe(DisasContext *ctx, TCGv addr)
+{
+#if defined(TARGET_PPC64)
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 32);
+ gen_qemu_st32(t0, addr, ctx->mem_idx);
+ tcg_temp_free(t0);
+#else
+ gen_qemu_st32(cpu_gprh[rS(ctx->opcode)], addr, ctx->mem_idx);
+#endif
+}
+
+static always_inline void gen_op_evstwwo(DisasContext *ctx, TCGv addr)
+{
+ gen_qemu_st32(cpu_gpr[rS(ctx->opcode)], addr, ctx->mem_idx);
+}
+
+#define GEN_SPEOP_LDST(name, opc2, sh) \
+GEN_HANDLER(gen_##name, 0x04, opc2, 0x0C, 0x00000000, PPC_SPE) \
+{ \
+ TCGv t0; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ GEN_EXCP_NO_AP(ctx); \
+ return; \
+ } \
+ t0 = tcg_temp_new(); \
+ if (Rc(ctx->opcode)) { \
+ gen_addr_spe_imm_index(t0, ctx, sh); \
+ } else { \
+ gen_addr_reg_index(t0, ctx); \
+ } \
+ gen_op_##name(ctx, t0); \
+ tcg_temp_free(t0); \
+}
+
+GEN_SPEOP_LDST(evldd, 0x00, 3);
+GEN_SPEOP_LDST(evldw, 0x01, 3);
+GEN_SPEOP_LDST(evldh, 0x02, 3);
+GEN_SPEOP_LDST(evlhhesplat, 0x04, 1);
+GEN_SPEOP_LDST(evlhhousplat, 0x06, 1);
+GEN_SPEOP_LDST(evlhhossplat, 0x07, 1);
+GEN_SPEOP_LDST(evlwhe, 0x08, 2);
+GEN_SPEOP_LDST(evlwhou, 0x0A, 2);
+GEN_SPEOP_LDST(evlwhos, 0x0B, 2);
+GEN_SPEOP_LDST(evlwwsplat, 0x0C, 2);
+GEN_SPEOP_LDST(evlwhsplat, 0x0E, 2);
+
+GEN_SPEOP_LDST(evstdd, 0x10, 3);
+GEN_SPEOP_LDST(evstdw, 0x11, 3);
+GEN_SPEOP_LDST(evstdh, 0x12, 3);
+GEN_SPEOP_LDST(evstwhe, 0x18, 2);
+GEN_SPEOP_LDST(evstwho, 0x1A, 2);
+GEN_SPEOP_LDST(evstwwe, 0x1C, 2);
+GEN_SPEOP_LDST(evstwwo, 0x1E, 2);
/* Multiply and add - TODO */
#if 0
@@ -7440,7 +7444,6 @@ static always_inline void gen_intermediate_code_internal (CPUState *env,
#else
ctx.mem_idx = (supervisor << 1) | little_endian;
#endif
- ctx.dcache_line_size = env->dcache_line_size;
ctx.fpu_enabled = msr_fp;
if ((env->flags & POWERPC_FLAG_SPE) && msr_spe)
ctx.spe_enabled = msr_spe;
@@ -7470,8 +7473,8 @@ static always_inline void gen_intermediate_code_internal (CPUState *env,
gen_icount_start();
/* Set env in case of segfault during code fetch */
while (ctx.exception == POWERPC_EXCP_NONE && gen_opc_ptr < gen_opc_end) {
- if (unlikely(env->breakpoints)) {
- for (bp = env->breakpoints; bp != NULL; bp = bp->next) {
+ if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
+ TAILQ_FOREACH(bp, &env->breakpoints, entry) {
if (bp->pc == ctx.nip) {
gen_update_nip(&ctx, ctx.nip);
gen_helper_raise_debug();
@@ -7630,41 +7633,5 @@ void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
void gen_pc_load(CPUState *env, TranslationBlock *tb,
unsigned long searched_pc, int pc_pos, void *puc)
{
- int type, c;
- /* for PPC, we need to look at the micro operation to get the
- * access type */
env->nip = gen_opc_pc[pc_pos];
- c = gen_opc_buf[pc_pos];
- switch(c) {
-#if defined(CONFIG_USER_ONLY)
-#define CASE3(op)\
- case INDEX_op_ ## op ## _raw
-#else
-#define CASE3(op)\
- case INDEX_op_ ## op ## _user:\
- case INDEX_op_ ## op ## _kernel:\
- case INDEX_op_ ## op ## _hypv
-#endif
-
- CASE3(stfd):
- CASE3(stfs):
- CASE3(lfd):
- CASE3(lfs):
- type = ACCESS_FLOAT;
- break;
- CASE3(lwarx):
- type = ACCESS_RES;
- break;
- CASE3(stwcx):
- type = ACCESS_RES;
- break;
- CASE3(eciwx):
- CASE3(ecowx):
- type = ACCESS_EXT;
- break;
- default:
- type = ACCESS_INT;
- break;
- }
- env->access_type = type;
}
diff --git a/target-sh4/exec.h b/target-sh4/exec.h
index f2e6feada..612261084 100644
--- a/target-sh4/exec.h
+++ b/target-sh4/exec.h
@@ -65,6 +65,4 @@ int find_utlb_entry(CPUState * env, target_ulong address, int use_asid);
void do_interrupt(CPUState * env);
-void cpu_loop_exit(void);
-
#endif /* _EXEC_SH4_H */
diff --git a/target-sh4/translate.c b/target-sh4/translate.c
index bbfd74573..505b19690 100644
--- a/target-sh4/translate.c
+++ b/target-sh4/translate.c
@@ -1776,8 +1776,8 @@ gen_intermediate_code_internal(CPUState * env, TranslationBlock * tb,
max_insns = CF_COUNT_MASK;
gen_icount_start();
while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) {
- if (unlikely(env->breakpoints)) {
- for (bp = env->breakpoints; bp != NULL; bp = bp->next) {
+ if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
+ TAILQ_FOREACH(bp, &env->breakpoints, entry) {
if (ctx.pc == bp->pc) {
/* We have hit a breakpoint - make sure PC is up-to-date */
tcg_gen_movi_i32(cpu_pc, ctx.pc);
diff --git a/target-sparc/exec.h b/target-sparc/exec.h
index 75410aa79..36fd1176f 100644
--- a/target-sparc/exec.h
+++ b/target-sparc/exec.h
@@ -24,9 +24,6 @@ static inline void regs_to_env(void)
/* op_helper.c */
void do_interrupt(CPUState *env);
-/* cpu-exec.c */
-void cpu_loop_exit(void);
-
static inline int cpu_halted(CPUState *env1) {
if (!env1->halted)
return 0;
diff --git a/target-sparc/translate.c b/target-sparc/translate.c
index e94e3c536..07b2624b0 100644
--- a/target-sparc/translate.c
+++ b/target-sparc/translate.c
@@ -4816,8 +4816,8 @@ static inline void gen_intermediate_code_internal(TranslationBlock * tb,
max_insns = CF_COUNT_MASK;
gen_icount_start();
do {
- if (unlikely(env->breakpoints)) {
- for (bp = env->breakpoints; bp != NULL; bp = bp->next) {
+ if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
+ TAILQ_FOREACH(bp, &env->breakpoints, entry) {
if (bp->pc == dc->pc) {
if (dc->pc != pc_start)
save_state(dc, cpu_cond);
diff --git a/tcg/tcg.c b/tcg/tcg.c
index ad631e9f5..78e7f9326 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -621,8 +621,9 @@ void tcg_gen_callN(TCGContext *s, TCGv_ptr func, unsigned int flags,
#endif
#ifdef TCG_TARGET_CALL_ALIGN_ARGS
/* some targets want aligned 64 bit args */
- if (i & 1) {
+ if (real_args & 1) {
*gen_opparam_ptr++ = TCG_CALL_DUMMY_ARG;
+ real_args++;
}
#endif
#ifdef TCG_TARGET_WORDS_BIGENDIAN
diff --git a/usb-stub.c b/usb-stub.c
index 29aee5dad..52105c395 100644
--- a/usb-stub.c
+++ b/usb-stub.c
@@ -30,6 +30,8 @@
* THE SOFTWARE.
*/
+#include "qemu-common.h"
+#include "console.h"
#include "hw/usb.h"
void usb_host_info(void)
diff --git a/vl.c b/vl.c
index 46733e904..0eea3b327 100644
--- a/vl.c
+++ b/vl.c
@@ -1673,15 +1673,6 @@ static void try_to_rearm_timer(void *opaque)
} while ((len == -1 && errno == EINTR) || len > 0);
#endif
- /* vm time timers */
- if (vm_running && likely(!(cur_cpu && (cur_cpu->singlestep_enabled & SSTEP_NOTIMER))))
- qemu_run_timers(&active_timers[QEMU_TIMER_VIRTUAL],
- qemu_get_clock(vm_clock));
-
- /* real time timers */
- qemu_run_timers(&active_timers[QEMU_TIMER_REALTIME],
- qemu_get_clock(rt_clock));
-
if (t->flags & ALARM_FLAG_EXPIRED) {
alarm_timer->flags &= ~ALARM_FLAG_EXPIRED;
qemu_rearm_alarm_timer(alarm_timer);
@@ -3763,6 +3754,16 @@ void main_loop_wait(int timeout)
}
#endif
+ /* vm time timers */
+ if (vm_running && (!cur_cpu
+ || likely(!(cur_cpu->singlestep_enabled & SSTEP_NOTIMER))))
+ qemu_run_timers(&active_timers[QEMU_TIMER_VIRTUAL],
+ qemu_get_clock(vm_clock));
+
+ /* real time timers */
+ qemu_run_timers(&active_timers[QEMU_TIMER_REALTIME],
+ qemu_get_clock(rt_clock));
+
/* Check bottom-halves last in case any of the earlier events triggered
them. */
qemu_bh_poll();
diff --git a/vnc.c b/vnc.c
index 01bcc8d2b..f663b385d 100644
--- a/vnc.c
+++ b/vnc.c
@@ -321,7 +321,7 @@ static void vnc_dpy_resize(DisplayState *ds, int w, int h)
}
memset(vs->dirty_row, 0xFF, sizeof(vs->dirty_row));
- memset(vs->old_data, 42, vs->ds->linesize * vs->ds->height);
+ memset(vs->old_data, 42, ds_get_linesize(vs->ds) * ds_get_height(vs->ds));
}
/* fastest code */
@@ -414,10 +414,10 @@ static void send_framebuffer_update_raw(VncState *vs, int x, int y, int w, int h
vnc_framebuffer_update(vs, x, y, w, h, 0);
- row = vs->ds->data + y * vs->ds->linesize + x * vs->depth;
+ row = ds_get_data(vs->ds) + y * ds_get_linesize(vs->ds) + x * vs->depth;
for (i = 0; i < h; i++) {
vs->write_pixels(vs, row, w * vs->depth);
- row += vs->ds->linesize;
+ row += ds_get_linesize(vs->ds);
}
}
@@ -495,7 +495,7 @@ static void vnc_copy(DisplayState *ds, int src_x, int src_y, int dst_x, int dst_
uint8_t *dst_row;
char *old_row;
int y = 0;
- int pitch = ds->linesize;
+ int pitch = ds_get_linesize(ds);
VncState *vs = ds->opaque;
vnc_update_client(vs);
@@ -505,11 +505,11 @@ static void vnc_copy(DisplayState *ds, int src_x, int src_y, int dst_x, int dst_
pitch = -pitch;
}
- src = (ds->linesize * (src_y + y) + vs->depth * src_x);
- dst = (ds->linesize * (dst_y + y) + vs->depth * dst_x);
+ src = (ds_get_linesize(ds) * (src_y + y) + vs->depth * src_x);
+ dst = (ds_get_linesize(ds) * (dst_y + y) + vs->depth * dst_x);
- src_row = ds->data + src;
- dst_row = ds->data + dst;
+ src_row = ds_get_data(ds) + src;
+ dst_row = ds_get_data(ds) + dst;
old_row = vs->old_data + dst;
for (y = 0; y < h; y++) {
@@ -563,7 +563,7 @@ static void vnc_update_client(void *opaque)
/* Walk through the dirty map and eliminate tiles that
really aren't dirty */
- row = vs->ds->data;
+ row = ds_get_data(vs->ds);
old_row = vs->old_data;
for (y = 0; y < vs->height; y++) {
@@ -575,7 +575,7 @@ static void vnc_update_client(void *opaque)
ptr = row;
old_ptr = (char*)old_row;
- for (x = 0; x < vs->ds->width; x += 16) {
+ for (x = 0; x < ds_get_width(vs->ds); x += 16) {
if (memcmp(old_ptr, ptr, 16 * vs->depth) == 0) {
vnc_clear_bit(vs->dirty_row[y], (x / 16));
} else {
@@ -588,8 +588,8 @@ static void vnc_update_client(void *opaque)
}
}
- row += vs->ds->linesize;
- old_row += vs->ds->linesize;
+ row += ds_get_linesize(vs->ds);
+ old_row += ds_get_linesize(vs->ds);
}
if (!has_dirty) {
@@ -918,7 +918,7 @@ static void check_pointer_type_change(VncState *vs, int absolute)
vnc_write_u8(vs, 0);
vnc_write_u16(vs, 1);
vnc_framebuffer_update(vs, absolute, 0,
- vs->ds->width, vs->ds->height, -257);
+ ds_get_width(vs->ds), ds_get_height(vs->ds), -257);
vnc_flush(vs);
}
vs->absolute = absolute;
@@ -941,8 +941,8 @@ static void pointer_event(VncState *vs, int button_mask, int x, int y)
dz = 1;
if (vs->absolute) {
- kbd_mouse_event(x * 0x7FFF / (vs->ds->width - 1),
- y * 0x7FFF / (vs->ds->height - 1),
+ kbd_mouse_event(x * 0x7FFF / (ds_get_width(vs->ds) - 1),
+ y * 0x7FFF / (ds_get_height(vs->ds) - 1),
dz, buttons);
} else if (vs->has_pointer_type_change) {
x -= 0x7FFF;
@@ -1106,25 +1106,25 @@ static void framebuffer_update_request(VncState *vs, int incremental,
int x_position, int y_position,
int w, int h)
{
- if (x_position > vs->ds->width)
- x_position = vs->ds->width;
- if (y_position > vs->ds->height)
- y_position = vs->ds->height;
- if (x_position + w >= vs->ds->width)
- w = vs->ds->width - x_position;
- if (y_position + h >= vs->ds->height)
- h = vs->ds->height - y_position;
+ if (x_position > ds_get_width(vs->ds))
+ x_position = ds_get_width(vs->ds);
+ if (y_position > ds_get_height(vs->ds))
+ y_position = ds_get_height(vs->ds);
+ if (x_position + w >= ds_get_width(vs->ds))
+ w = ds_get_width(vs->ds) - x_position;
+ if (y_position + h >= ds_get_height(vs->ds))
+ h = ds_get_height(vs->ds) - y_position;
int i;
vs->need_update = 1;
if (!incremental) {
- char *old_row = vs->old_data + y_position * vs->ds->linesize;
+ char *old_row = vs->old_data + y_position * ds_get_linesize(vs->ds);
for (i = 0; i < h; i++) {
vnc_set_bits(vs->dirty_row[y_position + i],
- (vs->ds->width / 16), VNC_DIRTY_WORDS);
- memset(old_row, 42, vs->ds->width * vs->depth);
- old_row += vs->ds->linesize;
+ (ds_get_width(vs->ds) / 16), VNC_DIRTY_WORDS);
+ memset(old_row, 42, ds_get_width(vs->ds) * vs->depth);
+ old_row += ds_get_linesize(vs->ds);
}
}
}
@@ -1134,7 +1134,7 @@ static void send_ext_key_event_ack(VncState *vs)
vnc_write_u8(vs, 0);
vnc_write_u8(vs, 0);
vnc_write_u16(vs, 1);
- vnc_framebuffer_update(vs, 0, 0, vs->ds->width, vs->ds->height, -258);
+ vnc_framebuffer_update(vs, 0, 0, ds_get_width(vs->ds), ds_get_height(vs->ds), -258);
vnc_flush(vs);
}
@@ -1497,10 +1497,10 @@ static int protocol_client_init(VncState *vs, uint8_t *data, size_t len)
char buf[1024];
int size;
- vs->width = vs->ds->width;
- vs->height = vs->ds->height;
- vnc_write_u16(vs, vs->ds->width);
- vnc_write_u16(vs, vs->ds->height);
+ vs->width = ds_get_width(vs->ds);
+ vs->height = ds_get_height(vs->ds);
+ vnc_write_u16(vs, ds_get_width(vs->ds));
+ vnc_write_u16(vs, ds_get_height(vs->ds));
pixel_format_message(vs);
@@ -2116,7 +2116,7 @@ static void vnc_connect(VncState *vs)
vnc_write(vs, "RFB 003.008\n", 12);
vnc_flush(vs);
vnc_read_when(vs, protocol_version, 12);
- memset(vs->old_data, 0, vs->ds->linesize * vs->ds->height);
+ memset(vs->old_data, 0, ds_get_linesize(vs->ds) * ds_get_height(vs->ds));
memset(vs->dirty_row, 0xFF, sizeof(vs->dirty_row));
vs->has_resize = 0;
vs->has_hextile = 0;
diff --git a/vnchextile.h b/vnchextile.h
index eb05feb60..e0a62bbb0 100644
--- a/vnchextile.h
+++ b/vnchextile.h
@@ -13,7 +13,7 @@ static void CONCAT(send_hextile_tile_, NAME)(VncState *vs,
void *last_fg_,
int *has_bg, int *has_fg)
{
- uint8_t *row = (vs->ds->data + y * vs->ds->linesize + x * vs->depth);
+ uint8_t *row = (ds_get_data(vs->ds) + y * ds_get_linesize(vs->ds) + x * vs->depth);
pixel_t *irow = (pixel_t *)row;
int j, i;
pixel_t *last_bg = (pixel_t *)last_bg_;
@@ -57,7 +57,7 @@ static void CONCAT(send_hextile_tile_, NAME)(VncState *vs,
}
if (n_colors > 2)
break;
- irow += vs->ds->linesize / sizeof(pixel_t);
+ irow += ds_get_linesize(vs->ds) / sizeof(pixel_t);
}
if (n_colors > 1 && fg_count > bg_count) {
@@ -105,7 +105,7 @@ static void CONCAT(send_hextile_tile_, NAME)(VncState *vs,
n_data += 2;
n_subtiles++;
}
- irow += vs->ds->linesize / sizeof(pixel_t);
+ irow += ds_get_linesize(vs->ds) / sizeof(pixel_t);
}
break;
case 3:
@@ -161,7 +161,7 @@ static void CONCAT(send_hextile_tile_, NAME)(VncState *vs,
n_data += 2;
n_subtiles++;
}
- irow += vs->ds->linesize / sizeof(pixel_t);
+ irow += ds_get_linesize(vs->ds) / sizeof(pixel_t);
}
/* A SubrectsColoured subtile invalidates the foreground color */
@@ -198,7 +198,7 @@ static void CONCAT(send_hextile_tile_, NAME)(VncState *vs,
} else {
for (j = 0; j < h; j++) {
vs->write_pixels(vs, row, w * vs->depth);
- row += vs->ds->linesize;
+ row += ds_get_linesize(vs->ds);
}
}
}